python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
''' Modified based on: https://github.com/erikwijmans/Pointnet2_PyTorch '''
from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
import torch
from torch.autograd import Function
import torch.nn as nn
import pytorch_utils as pt_utils
import sys
try:
import builtins
except:
import __builtin__ as builtins
try:
import pointnet2._ext as _ext
except ImportError:
if not getattr(builtins, "__POINTNET2_SETUP__", False):
raise ImportError(
"Could not import _ext module.\n"
"Please see the setup instructions in the README: "
"https://github.com/erikwijmans/Pointnet2_PyTorch/blob/master/README.rst"
)
if False:
# Workaround for type hints without depending on the `typing` module
from typing import *
class RandomDropout(nn.Module):
def __init__(self, p=0.5, inplace=False):
super(RandomDropout, self).__init__()
self.p = p
self.inplace = inplace
def forward(self, X):
theta = torch.Tensor(1).uniform_(0, self.p)[0]
return pt_utils.feature_dropout_no_scaling(X, theta, self.train, self.inplace)
class FurthestPointSampling(Function):
@staticmethod
def forward(ctx, xyz, npoint):
# type: (Any, torch.Tensor, int) -> torch.Tensor
r"""
Uses iterative furthest point sampling to select a set of npoint features that have the largest
minimum distance
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor where N > npoint
npoint : int32
number of features in the sampled set
Returns
-------
torch.Tensor
(B, npoint) tensor containing the set
"""
fps_inds = _ext.furthest_point_sampling(xyz, npoint)
ctx.mark_non_differentiable(fps_inds)
return fps_inds
@staticmethod
def backward(xyz, a=None):
return None, None
furthest_point_sample = FurthestPointSampling.apply
class GatherOperation(Function):
@staticmethod
def forward(ctx, features, idx):
# type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
features : torch.Tensor
(B, C, N) tensor
idx : torch.Tensor
(B, npoint) tensor of the features to gather
Returns
-------
torch.Tensor
(B, C, npoint) tensor
"""
_, C, N = features.size()
ctx.for_backwards = (idx, C, N)
return _ext.gather_points(features, idx)
@staticmethod
def backward(ctx, grad_out):
idx, C, N = ctx.for_backwards
grad_features = _ext.gather_points_grad(grad_out.contiguous(), idx, N)
return grad_features, None
gather_operation = GatherOperation.apply
class ThreeNN(Function):
@staticmethod
def forward(ctx, unknown, known):
# type: (Any, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]
r"""
Find the three nearest neighbors of unknown in known
Parameters
----------
unknown : torch.Tensor
(B, n, 3) tensor of known features
known : torch.Tensor
(B, m, 3) tensor of unknown features
Returns
-------
dist : torch.Tensor
(B, n, 3) l2 distance to the three nearest neighbors
idx : torch.Tensor
(B, n, 3) index of 3 nearest neighbors
"""
dist2, idx = _ext.three_nn(unknown, known)
return torch.sqrt(dist2), idx
@staticmethod
def backward(ctx, a=None, b=None):
return None, None
three_nn = ThreeNN.apply
class ThreeInterpolate(Function):
@staticmethod
def forward(ctx, features, idx, weight):
# type(Any, torch.Tensor, torch.Tensor, torch.Tensor) -> Torch.Tensor
r"""
Performs weight linear interpolation on 3 features
Parameters
----------
features : torch.Tensor
(B, c, m) Features descriptors to be interpolated from
idx : torch.Tensor
(B, n, 3) three nearest neighbors of the target features in features
weight : torch.Tensor
(B, n, 3) weights
Returns
-------
torch.Tensor
(B, c, n) tensor of the interpolated features
"""
B, c, m = features.size()
n = idx.size(1)
ctx.three_interpolate_for_backward = (idx, weight, m)
return _ext.three_interpolate(features, idx, weight)
@staticmethod
def backward(ctx, grad_out):
# type: (Any, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]
r"""
Parameters
----------
grad_out : torch.Tensor
(B, c, n) tensor with gradients of ouputs
Returns
-------
grad_features : torch.Tensor
(B, c, m) tensor with gradients of features
None
None
"""
idx, weight, m = ctx.three_interpolate_for_backward
grad_features = _ext.three_interpolate_grad(
grad_out.contiguous(), idx, weight, m
)
return grad_features, None, None
three_interpolate = ThreeInterpolate.apply
class GroupingOperation(Function):
@staticmethod
def forward(ctx, features, idx):
# type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
features : torch.Tensor
(B, C, N) tensor of features to group
idx : torch.Tensor
(B, npoint, nsample) tensor containing the indicies of features to group with
Returns
-------
torch.Tensor
(B, C, npoint, nsample) tensor
"""
B, nfeatures, nsample = idx.size()
_, C, N = features.size()
ctx.for_backwards = (idx, N)
return _ext.group_points(features, idx)
@staticmethod
def backward(ctx, grad_out):
# type: (Any, torch.tensor) -> Tuple[torch.Tensor, torch.Tensor]
r"""
Parameters
----------
grad_out : torch.Tensor
(B, C, npoint, nsample) tensor of the gradients of the output from forward
Returns
-------
torch.Tensor
(B, C, N) gradient of the features
None
"""
idx, N = ctx.for_backwards
grad_features = _ext.group_points_grad(grad_out.contiguous(), idx, N)
return grad_features, None
grouping_operation = GroupingOperation.apply
class BallQuery(Function):
@staticmethod
def forward(ctx, radius, nsample, xyz, new_xyz):
# type: (Any, float, int, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
radius : float
radius of the balls
nsample : int
maximum number of features in the balls
xyz : torch.Tensor
(B, N, 3) xyz coordinates of the features
new_xyz : torch.Tensor
(B, npoint, 3) centers of the ball query
Returns
-------
torch.Tensor
(B, npoint, nsample) tensor with the indicies of the features that form the query balls
"""
inds = _ext.ball_query(new_xyz, xyz, radius, nsample)
ctx.mark_non_differentiable(inds)
return inds
@staticmethod
def backward(ctx, a=None):
return None, None, None, None
ball_query = BallQuery.apply
class QueryAndGroup(nn.Module):
r"""
Groups with a ball query of radius
Parameters
---------
radius : float32
Radius of ball
nsample : int32
Maximum number of features to gather in the ball
"""
def __init__(self, radius, nsample, use_xyz=True, ret_grouped_xyz=False, normalize_xyz=False, sample_uniformly=False, ret_unique_cnt=False):
# type: (QueryAndGroup, float, int, bool) -> None
super(QueryAndGroup, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
self.ret_grouped_xyz = ret_grouped_xyz
self.normalize_xyz = normalize_xyz
self.sample_uniformly = sample_uniformly
self.ret_unique_cnt = ret_unique_cnt
if self.ret_unique_cnt:
assert(self.sample_uniformly)
def forward(self, xyz, new_xyz, features=None):
# type: (QueryAndGroup, torch.Tensor. torch.Tensor, torch.Tensor) -> Tuple[Torch.Tensor]
r"""
Parameters
----------
xyz : torch.Tensor
xyz coordinates of the features (B, N, 3)
new_xyz : torch.Tensor
centriods (B, npoint, 3)
features : torch.Tensor
Descriptors of the features (B, C, N)
Returns
-------
new_features : torch.Tensor
(B, 3 + C, npoint, nsample) tensor
"""
idx = ball_query(self.radius, self.nsample, xyz, new_xyz)
if self.sample_uniformly:
unique_cnt = torch.zeros((idx.shape[0], idx.shape[1]))
for i_batch in range(idx.shape[0]):
for i_region in range(idx.shape[1]):
unique_ind = torch.unique(idx[i_batch, i_region, :])
num_unique = unique_ind.shape[0]
unique_cnt[i_batch, i_region] = num_unique
sample_ind = torch.randint(0, num_unique, (self.nsample - num_unique,), dtype=torch.long)
all_ind = torch.cat((unique_ind, unique_ind[sample_ind]))
idx[i_batch, i_region, :] = all_ind
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping_operation(xyz_trans, idx) # (B, 3, npoint, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if self.normalize_xyz:
grouped_xyz /= self.radius
if features is not None:
grouped_features = grouping_operation(features, idx)
if self.use_xyz:
new_features = torch.cat(
[grouped_xyz, grouped_features], dim=1
) # (B, C + 3, npoint, nsample)
else:
new_features = grouped_features
else:
assert (
self.use_xyz
), "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
ret = [new_features]
if self.ret_grouped_xyz:
ret.append(grouped_xyz)
if self.ret_unique_cnt:
ret.append(unique_cnt)
if len(ret) == 1:
return ret[0]
else:
return tuple(ret)
class GroupAll(nn.Module):
r"""
Groups all features
Parameters
---------
"""
def __init__(self, use_xyz=True, ret_grouped_xyz=False):
# type: (GroupAll, bool) -> None
super(GroupAll, self).__init__()
self.use_xyz = use_xyz
def forward(self, xyz, new_xyz, features=None):
# type: (GroupAll, torch.Tensor, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor]
r"""
Parameters
----------
xyz : torch.Tensor
xyz coordinates of the features (B, N, 3)
new_xyz : torch.Tensor
Ignored
features : torch.Tensor
Descriptors of the features (B, C, N)
Returns
-------
new_features : torch.Tensor
(B, C + 3, 1, N) tensor
"""
grouped_xyz = xyz.transpose(1, 2).unsqueeze(2)
if features is not None:
grouped_features = features.unsqueeze(2)
if self.use_xyz:
new_features = torch.cat(
[grouped_xyz, grouped_features], dim=1
) # (B, 3 + C, 1, N)
else:
new_features = grouped_features
else:
new_features = grouped_xyz
if self.ret_grouped_xyz:
return new_features, grouped_xyz
else:
return new_features
| 3detr-main | third_party/pointnet2/pointnet2_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
''' Testing customized ops. '''
import torch
from torch.autograd import gradcheck
import numpy as np
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
import pointnet2_utils
def test_interpolation_grad():
batch_size = 1
feat_dim = 2
m = 4
feats = torch.randn(batch_size, feat_dim, m, requires_grad=True).float().cuda()
def interpolate_func(inputs):
idx = torch.from_numpy(np.array([[[0,1,2],[1,2,3]]])).int().cuda()
weight = torch.from_numpy(np.array([[[1,1,1],[2,2,2]]])).float().cuda()
interpolated_feats = pointnet2_utils.three_interpolate(inputs, idx, weight)
return interpolated_feats
assert (gradcheck(interpolate_func, feats, atol=1e-1, rtol=1e-1))
if __name__=='__main__':
test_interpolation_grad()
| 3detr-main | third_party/pointnet2/pointnet2_test.py |
# Copyright (c) Facebook, Inc. and its affiliates.
''' Pointnet2 layers.
Modified based on: https://github.com/erikwijmans/Pointnet2_PyTorch
Extended with the following:
1. Uniform sampling in each local region (sample_uniformly)
2. Return sampled points indices to support votenet.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
import pointnet2_utils
import pytorch_utils as pt_utils
from typing import List
class _PointnetSAModuleBase(nn.Module):
def __init__(self):
super().__init__()
self.npoint = None
self.groupers = None
self.mlps = None
def forward(self, xyz: torch.Tensor,
features: torch.Tensor = None) -> (torch.Tensor, torch.Tensor):
r"""
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor of the xyz coordinates of the features
features : torch.Tensor
(B, N, C) tensor of the descriptors of the the features
Returns
-------
new_xyz : torch.Tensor
(B, npoint, 3) tensor of the new features' xyz
new_features : torch.Tensor
(B, npoint, \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
new_features_list = []
xyz_flipped = xyz.transpose(1, 2).contiguous()
new_xyz = pointnet2_utils.gather_operation(
xyz_flipped,
pointnet2_utils.furthest_point_sample(xyz, self.npoint)
).transpose(1, 2).contiguous() if self.npoint is not None else None
for i in range(len(self.groupers)):
new_features = self.groupers[i](
xyz, new_xyz, features
) # (B, C, npoint, nsample)
new_features = self.mlps[i](
new_features
) # (B, mlp[-1], npoint, nsample)
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint)
new_features_list.append(new_features)
return new_xyz, torch.cat(new_features_list, dim=1)
class PointnetSAModuleMSG(_PointnetSAModuleBase):
r"""Pointnet set abstrction layer with multiscale grouping
Parameters
----------
npoint : int
Number of features
radii : list of float32
list of radii to group with
nsamples : list of int32
Number of samples in each ball query
mlps : list of list of int32
Spec of the pointnet before the global max_pool for each scale
bn : bool
Use batchnorm
"""
def __init__(
self,
*,
npoint: int,
radii: List[float],
nsamples: List[int],
mlps: List[List[int]],
bn: bool = True,
use_xyz: bool = True,
sample_uniformly: bool = False
):
super().__init__()
assert len(radii) == len(nsamples) == len(mlps)
self.npoint = npoint
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz, sample_uniformly=sample_uniformly)
if npoint is not None else pointnet2_utils.GroupAll(use_xyz)
)
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn))
class PointnetSAModule(PointnetSAModuleMSG):
r"""Pointnet set abstrction layer
Parameters
----------
npoint : int
Number of features
radius : float
Radius of ball
nsample : int
Number of samples in the ball query
mlp : list
Spec of the pointnet before the global max_pool
bn : bool
Use batchnorm
"""
def __init__(
self,
*,
mlp: List[int],
npoint: int = None,
radius: float = None,
nsample: int = None,
bn: bool = True,
use_xyz: bool = True
):
super().__init__(
mlps=[mlp],
npoint=npoint,
radii=[radius],
nsamples=[nsample],
bn=bn,
use_xyz=use_xyz
)
class PointnetSAModuleVotes(nn.Module):
''' Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG
with extra support for returning point indices for getting their GT votes '''
def __init__(
self,
*,
mlp: List[int],
npoint: int = None,
radius: float = None,
nsample: int = None,
bn: bool = True,
use_xyz: bool = True,
pooling: str = 'max',
sigma: float = None, # for RBF pooling
normalize_xyz: bool = False, # noramlize local XYZ with radius
sample_uniformly: bool = False,
ret_unique_cnt: bool = False
):
super().__init__()
self.npoint = npoint
self.radius = radius
self.nsample = nsample
self.pooling = pooling
self.mlp_module = None
self.use_xyz = use_xyz
self.sigma = sigma
if self.sigma is None:
self.sigma = self.radius/2
self.normalize_xyz = normalize_xyz
self.ret_unique_cnt = ret_unique_cnt
if npoint is not None:
self.grouper = pointnet2_utils.QueryAndGroup(radius, nsample,
use_xyz=use_xyz, ret_grouped_xyz=True, normalize_xyz=normalize_xyz,
sample_uniformly=sample_uniformly, ret_unique_cnt=ret_unique_cnt)
else:
self.grouper = pointnet2_utils.GroupAll(use_xyz, ret_grouped_xyz=True)
mlp_spec = mlp
if use_xyz and len(mlp_spec)>0:
mlp_spec[0] += 3
self.mlp_module = pt_utils.SharedMLP(mlp_spec, bn=bn)
def forward(self, xyz: torch.Tensor,
features: torch.Tensor = None,
inds: torch.Tensor = None) -> (torch.Tensor, torch.Tensor):
r"""
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor of the xyz coordinates of the features
features : torch.Tensor
(B, C, N) tensor of the descriptors of the the features
inds : torch.Tensor
(B, npoint) tensor that stores index to the xyz points (values in 0-N-1)
Returns
-------
new_xyz : torch.Tensor
(B, npoint, 3) tensor of the new features' xyz
new_features : torch.Tensor
(B, \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
inds: torch.Tensor
(B, npoint) tensor of the inds
"""
xyz_flipped = xyz.transpose(1, 2).contiguous()
if inds is None:
inds = pointnet2_utils.furthest_point_sample(xyz, self.npoint)
else:
assert(inds.shape[1] == self.npoint)
new_xyz = pointnet2_utils.gather_operation(
xyz_flipped, inds
).transpose(1, 2).contiguous() if self.npoint is not None else None
if not self.ret_unique_cnt:
grouped_features, grouped_xyz = self.grouper(
xyz, new_xyz, features
) # (B, C, npoint, nsample)
else:
grouped_features, grouped_xyz, unique_cnt = self.grouper(
xyz, new_xyz, features
) # (B, C, npoint, nsample), (B,3,npoint,nsample), (B,npoint)
new_features = self.mlp_module(
grouped_features
) # (B, mlp[-1], npoint, nsample)
if self.pooling == 'max':
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
elif self.pooling == 'avg':
new_features = F.avg_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
elif self.pooling == 'rbf':
# Use radial basis function kernel for weighted sum of features (normalized by nsample and sigma)
# Ref: https://en.wikipedia.org/wiki/Radial_basis_function_kernel
rbf = torch.exp(-1 * grouped_xyz.pow(2).sum(1,keepdim=False) / (self.sigma**2) / 2) # (B, npoint, nsample)
new_features = torch.sum(new_features * rbf.unsqueeze(1), -1, keepdim=True) / float(self.nsample) # (B, mlp[-1], npoint, 1)
new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint)
if not self.ret_unique_cnt:
return new_xyz, new_features, inds
else:
return new_xyz, new_features, inds, unique_cnt
class PointnetSAModuleMSGVotes(nn.Module):
''' Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG
with extra support for returning point indices for getting their GT votes '''
def __init__(
self,
*,
mlps: List[List[int]],
npoint: int,
radii: List[float],
nsamples: List[int],
bn: bool = True,
use_xyz: bool = True,
sample_uniformly: bool = False
):
super().__init__()
assert(len(mlps) == len(nsamples) == len(radii))
self.npoint = npoint
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz, sample_uniformly=sample_uniformly)
if npoint is not None else pointnet2_utils.GroupAll(use_xyz)
)
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn))
def forward(self, xyz: torch.Tensor,
features: torch.Tensor = None, inds: torch.Tensor = None) -> (torch.Tensor, torch.Tensor):
r"""
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor of the xyz coordinates of the features
features : torch.Tensor
(B, C, C) tensor of the descriptors of the the features
inds : torch.Tensor
(B, npoint) tensor that stores index to the xyz points (values in 0-N-1)
Returns
-------
new_xyz : torch.Tensor
(B, npoint, 3) tensor of the new features' xyz
new_features : torch.Tensor
(B, \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
inds: torch.Tensor
(B, npoint) tensor of the inds
"""
new_features_list = []
xyz_flipped = xyz.transpose(1, 2).contiguous()
if inds is None:
inds = pointnet2_utils.furthest_point_sample(xyz, self.npoint)
new_xyz = pointnet2_utils.gather_operation(
xyz_flipped, inds
).transpose(1, 2).contiguous() if self.npoint is not None else None
for i in range(len(self.groupers)):
new_features = self.groupers[i](
xyz, new_xyz, features
) # (B, C, npoint, nsample)
new_features = self.mlps[i](
new_features
) # (B, mlp[-1], npoint, nsample)
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint)
new_features_list.append(new_features)
return new_xyz, torch.cat(new_features_list, dim=1), inds
class PointnetFPModule(nn.Module):
r"""Propigates the features of one set to another
Parameters
----------
mlp : list
Pointnet module parameters
bn : bool
Use batchnorm
"""
def __init__(self, *, mlp: List[int], bn: bool = True):
super().__init__()
self.mlp = pt_utils.SharedMLP(mlp, bn=bn)
def forward(
self, unknown: torch.Tensor, known: torch.Tensor,
unknow_feats: torch.Tensor, known_feats: torch.Tensor
) -> torch.Tensor:
r"""
Parameters
----------
unknown : torch.Tensor
(B, n, 3) tensor of the xyz positions of the unknown features
known : torch.Tensor
(B, m, 3) tensor of the xyz positions of the known features
unknow_feats : torch.Tensor
(B, C1, n) tensor of the features to be propigated to
known_feats : torch.Tensor
(B, C2, m) tensor of features to be propigated
Returns
-------
new_features : torch.Tensor
(B, mlp[-1], n) tensor of the features of the unknown features
"""
if known is not None:
dist, idx = pointnet2_utils.three_nn(unknown, known)
dist_recip = 1.0 / (dist + 1e-8)
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = dist_recip / norm
interpolated_feats = pointnet2_utils.three_interpolate(
known_feats, idx, weight
)
else:
interpolated_feats = known_feats.expand(
*known_feats.size()[0:2], unknown.size(1)
)
if unknow_feats is not None:
new_features = torch.cat([interpolated_feats, unknow_feats],
dim=1) #(B, C2 + C1, n)
else:
new_features = interpolated_feats
new_features = new_features.unsqueeze(-1)
new_features = self.mlp(new_features)
return new_features.squeeze(-1)
class PointnetLFPModuleMSG(nn.Module):
''' Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG
learnable feature propagation layer.'''
def __init__(
self,
*,
mlps: List[List[int]],
radii: List[float],
nsamples: List[int],
post_mlp: List[int],
bn: bool = True,
use_xyz: bool = True,
sample_uniformly: bool = False
):
super().__init__()
assert(len(mlps) == len(nsamples) == len(radii))
self.post_mlp = pt_utils.SharedMLP(post_mlp, bn=bn)
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz,
sample_uniformly=sample_uniformly)
)
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn))
def forward(self, xyz2: torch.Tensor, xyz1: torch.Tensor,
features2: torch.Tensor, features1: torch.Tensor) -> torch.Tensor:
r""" Propagate features from xyz1 to xyz2.
Parameters
----------
xyz2 : torch.Tensor
(B, N2, 3) tensor of the xyz coordinates of the features
xyz1 : torch.Tensor
(B, N1, 3) tensor of the xyz coordinates of the features
features2 : torch.Tensor
(B, C2, N2) tensor of the descriptors of the the features
features1 : torch.Tensor
(B, C1, N1) tensor of the descriptors of the the features
Returns
-------
new_features1 : torch.Tensor
(B, \sum_k(mlps[k][-1]), N1) tensor of the new_features descriptors
"""
new_features_list = []
for i in range(len(self.groupers)):
new_features = self.groupers[i](
xyz1, xyz2, features1
) # (B, C1, N2, nsample)
new_features = self.mlps[i](
new_features
) # (B, mlp[-1], N2, nsample)
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], N2, 1)
new_features = new_features.squeeze(-1) # (B, mlp[-1], N2)
if features2 is not None:
new_features = torch.cat([new_features, features2],
dim=1) #(B, mlp[-1] + C2, N2)
new_features = new_features.unsqueeze(-1)
new_features = self.post_mlp(new_features)
new_features_list.append(new_features)
return torch.cat(new_features_list, dim=1).squeeze(-1)
if __name__ == "__main__":
from torch.autograd import Variable
torch.manual_seed(1)
torch.cuda.manual_seed_all(1)
xyz = Variable(torch.randn(2, 9, 3).cuda(), requires_grad=True)
xyz_feats = Variable(torch.randn(2, 9, 6).cuda(), requires_grad=True)
test_module = PointnetSAModuleMSG(
npoint=2, radii=[5.0, 10.0], nsamples=[6, 3], mlps=[[9, 3], [9, 6]]
)
test_module.cuda()
print(test_module(xyz, xyz_feats))
for _ in range(1):
_, new_features = test_module(xyz, xyz_feats)
new_features.backward(
torch.cuda.FloatTensor(*new_features.size()).fill_(1)
)
print(new_features)
print(xyz.grad)
| 3detr-main | third_party/pointnet2/pointnet2_modules.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from contextual.contextual_models import *
from contextual.contextual_linucb import *
from tqdm import trange
from collections import namedtuple
from cycler import cycler
import matplotlib.pyplot as plt
import cvxpy as cp
import numpy as np
from scipy.linalg import sqrtm
from scipy.optimize import minimize
from tqdm import trange, tqdm
from scipy.optimize import linprog
from joblib import Parallel, delayed
def work(m, rad, nb_arms, nb_features, noise, nb_simu, T, all_algs, random_state, M=1, bound_context=1):
# create model
K = nb_arms
model = AttackOneUserModel(n_actions=K, n_features=nb_features, noise=noise, random_state=seed, bound_context=bound_context
, distance=rad)
theta_bound = np.max(np.linalg.norm(model.thetas, axis=1))
target_arm = model.n_actions - 1
target_context = np.random.randint(low=0, high=len(model.context_lists))
x_star = model.context_lists[target_context]
mask = np.ones(model.n_actions, dtype='int')
mask[target_arm] = 0
print(in_hull(x=model.thetas[target_arm], points=np.array(model.thetas[mask])))
if in_hull(x=model.thetas[target_arm], points=np.array(model.thetas[mask])):
raise ValueError()
AAA = []
for alg_name in tqdm(all_algs.keys(), desc='Sim. model {}'.format(m)):
args = {'nb_arms': model.n_actions,
'dimension': model.n_features,
'bound_features': theta_bound,
'bound_context': model.bound_context,
'reg_factor': 0.1,
'delta': 0.01,
'noise_variance': noise,
}
alg = all_algs[alg_name](**args)
regret = np.zeros((nb_simu, T))
draws = [[]] * nb_simu
epsilon_norm = np.zeros((nb_simu, T))
for k in trange(nb_simu, desc='Nombre simulations'):
alg.reset()
for t in trange(T, desc='Iteration'):
context = model.get_context()
old_context = context
if 'Attacked' in alg_name:
if np.linalg.norm(context - x_star) < 10 ** -10:
if 'Relaxed' in alg_name:
epsilon = compute_relaxed_attack(alg, target_arm, context, slack=10 ** -4)
else:
epsilon = compute_attack(alg, target_arm, context, slack=10 ** -3)
else:
epsilon = np.zeros((model.n_features,))
context = context + epsilon
epsilon_norm[k, t] = np.linalg.norm(epsilon)
a_t = alg.get_action(context)
if np.linalg.norm(x_star - old_context) < 10 ** -10:
draws[k].append(1*(a_t == target_arm))
r_t = model.reward(old_context, a_t)
alg.update(context, a_t, r_t)
regret[k, t] = model.best_arm_reward(old_context) - np.dot(model.thetas[a_t], old_context)
draws[k] = np.cumsum(draws[k])[-1]
draws = np.array(draws)
AAA += [(alg_name, {"regret": regret, "attack_cond": epsilon_norm, "target_draws": draws})]
return m, AAA, model, rad
def in_hull(points, x):
n_points = len(points)
n_dim = len(x)
c = np.zeros(n_points)
A = np.r_[points.T, np.ones((1, n_points))]
b = np.r_[x, np.ones(1)]
lp = linprog(c, A_eq=A, b_eq=b)
return lp.success
def compute_relaxed_attack(alg, a_star, x_star, slack=10**-10):
d = alg.n_features
delta = cp.Variable(d)
obj = cp.Minimize(cp.quad_form(delta, np.eye(d))/2)
theta_a_star = alg.thetas_hat[a_star]
betas = alg.alpha()
constraints = []
P_a_star = sqrtm(alg.inv_design_matrices[a_star])
for a in range(len(alg.thetas_hat)):
if a != a_star:
theta = alg.thetas_hat[a]
P = sqrtm(alg.inv_design_matrices[a])
temp_constraint = (theta - theta_a_star)@(x_star+delta) + betas[a]*cp.norm(P@(x_star+delta))\
#- betas[a_star]R * (cp.norm(P_a_star @ x_star) + (alg.inv_design_matrices[a] @ x_star) @
# delta/cp.norm(P_a_star @ x_star))
constraints.append(temp_constraint + slack <= 0)
prob = cp.Problem(obj, constraints)
try:
prob.solve(verbose=False, max_iters=1000, feastol=10**-8)
epsilon = delta.value
# print('epsilon =', epsilon)
# for a in range(len(alg.thetas_hat)):
# if a != a_star:
# theta_a_star = alg.thetas_hat[a_star]
# betas = alg.alpha()
# theta = alg.thetas_hat[a]
# P = sqrtm(alg.inv_design_matrices[a])
# P_a_star = sqrtm(alg.inv_design_matrices[a_star])
# print('The constraint for arm {}:'.format(a), np.dot(theta - theta_a_star, (x_star+epsilon)) + betas[a]*np.linalg.norm(P.dot((x_star+epsilon))) \
# - betas[a_star] * (np.linalg.norm(P_a_star.dot(x_star)) +
# np.dot((alg.inv_design_matrices[a].dot(x_star)), epsilon)/np.linalg.norm(P_a_star.dot(x_star))))
except:
print('Exception')
epsilon = np.zeros((d,))
# print('epsilon =', epsilon)
if epsilon is None:
return np.zeros((d,))
return epsilon
def compute_attack(alg, a_star, x_star, slack=10 **-10):
d = alg.n_features
func = lambda delta: np.linalg.norm(delta)/2
theta_a_star = alg.thetas_hat[a_star]
P_a_star = sqrtm(alg.inv_design_matrices[a_star])
betas = alg.alpha()
constraints_list = []
for a in range(len(alg.thetas_hat)):
if a != a_star:
theta = alg.thetas_hat[a]
P = sqrtm(alg.inv_design_matrices[a])
temp_constraint = lambda delta, P=P, P_a_star=P_a_star, theta=theta, theta_a_star=theta_a_star, beta=betas[a], beta_a_star=betas[a_star]: \
-((theta - theta_a_star).dot((x_star + delta)) + beta * np.linalg.norm(P.dot((x_star + delta)))
- beta_a_star * np.linalg.norm(P_a_star.dot((x_star + delta))) + slack)
temp_cons = {'type': 'ineq', 'fun': temp_constraint}
constraints_list.append(temp_cons)
cons = tuple(constraints_list)
res = minimize(func, -x_star, method='SLSQP', constraints=cons)
# print(res.message)
try:
epsilon = res.x
except:
epsilon = np.zeros((d,))
if epsilon is None:
return np.zeros((d,))
return epsilon
n = 10 # Number of colors
new_colors = [plt.get_cmap('Set1')(1. * i / n) for i in range(n)]
# linestyle_cycler = cycler('linestyle', ['-', '--', ':', '-.', '-', '--', ':', '-.', '-'])
# plt.rc('axes', prop_cycle=(cycler('color', new_colors) + linestyle_cycler))
plt.rc('lines', linewidth=2)
MABResults = namedtuple('MABResults', 'regret, attack_cond, target_draws')
seed = np.random.randint(0, 10 ** 5)
print('seed = ', seed)
noise = 0.1
nb_radius = 4
radius = np.linspace(1/10, 1/2, nb_radius)
#radius = np.array([1/4, 1/10])
T = int(3*10**3)
nb_simu = 4
nb_arms = 9
n_features = 10
results = []
la = 0.1
parallel = True
algorithms = {
'LinUCB': lambda nb_arms, dimension, bound_features, bound_context, reg_factor=0.1, delta=0.01, noise_variance=noise:
ContextualLinearBandit(reg_factor=la,
delta=delta,
nb_arms=nb_arms,
dimension=dimension,
noise_variance=noise_variance,
bound_features=bound_features,
bound_context=bound_context),
'LinUCB RelaxedAttacked': lambda nb_arms, dimension, bound_features, bound_context, reg_factor=la, delta=0.01,
noise_variance=noise:
ContextualLinearBandit(reg_factor=la,
delta=delta,
nb_arms=nb_arms,
dimension=dimension,
noise_variance=noise_variance,
bound_features=bound_features,
bound_context=bound_context),
# 'LinUCB-Attacked': ContextualLinearBandit(nb_arms=model.n_actions, dimension=model.n_features,
# reg_factor=0.1, delta=0.99,
# bound_features=np.max(np.linalg.norm(model.thetas, axis=1)),
# noise_variance=noise, bound_context=model.bound_context),
# 'LinUCB-RelaxedAttacked': ContextualLinearBandit(nb_arms=model.n_actions, dimension=model.n_features,
# reg_factor=0.1, delta=0.01,
# bound_features=np.max(np.linalg.norm(model.thetas, axis=1)),
# noise_variance=noise, bound_context=model.bound_context)
}
if parallel:
import multiprocessing
num_cores = multiprocessing.cpu_count()
results = Parallel(n_jobs=num_cores, verbose=1)(
delayed(work)(m=i, rad=r, nb_arms=nb_arms, nb_features = n_features, noise = noise,
nb_simu=nb_simu, T=T, all_algs=algorithms, random_state=0, M=1, bound_context=1)
for i, r in enumerate(radius))
else:
for i, r in enumerate(radius):
ret = work(m=i, rad=r, nb_arms=nb_arms, nb_features = n_features, noise = noise,
nb_simu=nb_simu, T=T, all_algs=algorithms, random_state=0, M=1, bound_context=1)
results.append(ret)
n = 9 # Number of colors
new_colors = [plt.get_cmap('Set1')(1. * i / n) for i in range(n)]
linestyle_cycler = cycler('linestyle', ['-', '--', ':', '-.', '-', '--', ':', '-.', '-'])
plt.rc('axes', prop_cycle=(cycler('color', new_colors) + linestyle_cycler))
plt.rc('lines', linewidth=2)
for alg_name, res in results[0][1]:
algorithms[alg_name] = {'draws': np.zeros((nb_radius, nb_simu))}
for m in range(len(radius)):
res = results[m][1]
for i, val in enumerate(res):
alg_name = val[0]
val = val[1]
algorithms[alg_name]['draws'][m] = np.array(val['target_draws'])
import numpy as np
plt.figure(1, figsize=(8, 8))
t = np.linspace(0, T-1, T, dtype='int')
for alg_name, res in algorithms.items():
res['draws'] = np.array(res['draws'])
mean_draws = np.mean(res['draws'], axis=(1))
low_quantile = np.quantile(res['draws'], 0.1, axis=(1))
high_quantile = np.quantile(res['draws'], 1 - 0.1, axis=(1))
plt.plot(radius, mean_draws, label=alg_name)
plt.fill_between(radius, low_quantile, high_quantile, alpha=0.15)
plt.title('Number of target draws at T={}'.format(T))
print(mean_draws)
plt.legend()
plt.show()
# if n_features == 2:
# for i, (alg_name, val) in enumerate(results):
# plt.figure(i + 3)
# plt.title('Confidence ellipse for {}'.format(alg_name))
# x = np.linspace(0, 2*np.pi)
# x_1 = np.cos(x)
# y_1 = np.sin(x)
# X = np.vstack((x_1, y_1))
# betas = val.betas
# for a in range(model.n_actions):
# center = val.thetas[-1, -1, a]
# V = sqrtm(val.design_matrix[a])
# y = center.reshape((2, 1)) + betas[a] * np.dot(V, X)
# plt.plot(y[0, :], y[1, :], label = 'confidence ellipse arm {}'.format(a))
# plt.fill_between(y[0,:], y[1,:], (center.reshape((2, 1))*np.ones((2, 50)))[1, :], alpha=0.15)
# plt.scatter(model.thetas[a][0],model.thetas[a][1], c=new_colors[a])
# plt.scatter(center[0], center[1], marker='^', c=new_colors[a])
# plt.scatter(x_star[0], x_star[1], marker='+', c = new_colors[-1])
# plt.legend()
# plt.show()
#
# plt.figure(4)
# plt.title('Error true env and learned env attack context')
# for a in range(model.n_actions):
# error = np.linalg.norm(val.thetas[:, :, a] - model.thetas[a], axis=2)
# plt.plot(t, np.mean(error, axis=0), label=alg_name + ' arm {}'.format(a))
# low_quantile = np.quantile(error, 0.1, axis=0)
# high_quantile = np.quantile(error, 0.9, axis=0)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
# plt.figure(7)
# plt.title('Error biased env and learned env attack context')
# for a in range(model.n_actions):
# error = np.linalg.norm(val.thetas[:, :, a] - model.thetas[a]/np.maximum(2*np.dot(model.thetas[a], x_star)/np.dot(model.thetas[target_arm], x_star), 1), axis=2)
# plt.plot(t, np.mean(error, axis=0), label=alg_name + ' arm {}'.format(a))
# low_quantile = np.quantile(error, 0.1, axis=0)
# high_quantile = np.quantile(error, 0.9, axis=0)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
# plt.figure(5)
# plt.title('Difference estimated reward for target context {}'.format(target_context))
# for a in range(model.n_actions):
# plt.plot(t, np.mean(val.prod_scalar[:, :, a, target_context], axis=0), label=alg_name + ' arm {}'.format(a))
# low_quantile = np.quantile(val.prod_scalar[:, :, a, target_context], 0.1, axis=0)
# high_quantile = np.quantile(val.prod_scalar[:, :, a, target_context], 0.9, axis=0)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
# plt.figure(6)
# plt.title('Difference estimated reward for a random non target context {}'.format(other_context))
# for a in range(model.n_actions):
# plt.plot(t, np.mean(val.prod_scalar[:, :, a, other_context], axis=0), label=alg_name + ' arm {}'.format(a))
# low_quantile = np.quantile(val.prod_scalar[:, :, a, other_context], 0.1, axis=0)
# high_quantile = np.quantile(val.prod_scalar[:, :, a, other_context], 0.9, axis=0)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
plt.show()
#print('Target arms=', np.mean(np.cumsum(nb_target_arms,axis=1)))
#print('Attack needed arms=', np.mean(np.cumsum(nb_attack_needed,axis=1))) | ContextualBanditsAttacks-main | isoexp/test_distance_attack_one_user.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
sys.path.append('/private/home/broz/workspaces/bandits_attacks')
import isoexp.contextual.contextual_models as arms
from isoexp.contextual.contextual_linucb import *
from tqdm import tqdm
from cycler import cycler
import matplotlib.pyplot as plt
from joblib import Parallel, delayed
import math
import pickle
from tqdm import trange
import json
import datetime
from collections import namedtuple
import re
import os
frequency = 100
class exp(object):
def __init__(self, nb_arms, type='random', a_star=0, m=None):
self.K = nb_arms
self.type = type
self.a_star = a_star
self.m = m
def get_action(self, context):
if self.type == 'random':
return np.ones((self.K,)) / self.K
elif self.type == 'optimal':
means = np.dot(self.m.thetas, context)
a = np.argmax(means)
proba = np.zeros((self.K,))
proba[a] = 1
return proba
else:
proba = np.zeros((self.K,))
proba[self.a_star] = 1
return proba
def work(m, nb_arms, nb_features, noise, nb_simu, T, all_algs, random_state, M=1, bound_context=1, dataset=False, which=None):
# create model
K = nb_arms
if dataset:
if which == 'jester':
arm_file = os.path.abspath("examples/jester/Vt_jester.csv")
user_file = os.path.abspath("examples/jester/U.csv")
model = arms.DatasetModel(arm_csvfile=arm_file, user_csvfile=user_file, noise=noise, random_state=random_state)
else:
arm_file = os.path.abspath('examples/movielens/Vt_movielens.csv')
user_file = os.path.abspath('examples/movielens/U.csv')
model = arms.DatasetModel(arm_csvfile=arm_file, user_csvfile=user_file, noise=noise, random_state=random_state, arms_limit=25)
else:
model = arms.RandomContextualLinearArms(n_actions=K, n_features=nb_features, noise=noise,
random_state=random_state, bound_context=bound_context)
theta_bound = np.max(np.linalg.norm(model.thetas, axis=1))
target_context = np.random.randint(low=0, high=len(model.context_lists))
other_context = np.random.randint(low=0, high=len(model.context_lists))
# while other_context == target_context:
# other_context = np.random.randint(low=0, high=len(model.context_lists))
target_arm = np.random.randint(low=0, high=model.n_actions)
AAA = []
for alg_name in tqdm(all_algs.keys(), desc='Sim. model {}'.format(m)):
args = {'nb_arms': model.n_actions,
'dimension': model.n_features,
'bound_features': theta_bound,
'bound_context': model.bound_context,
'reg_factor': 0.1,
'delta': delta,
'noise_variance': noise,
}
if 'Exp4' in alg_name:
eta = np.sqrt(2 * np.log(M) / (T * model.n_actions))
experts = []
for i in range(M - 2):
experts.append(exp(nb_arms=model.n_actions, type='random'))
experts.append(exp(nb_arms=model.n_actions, type='optimal', m=model))
experts.append(exp(nb_arms=model.n_actions, type='', a_star=int(target_arm)))
args['experts'] = experts
args['eta'] = eta
alg = all_algs[alg_name](**args)
if 'attacked' in alg_name:
if 'gamma' in alg_name:
temp_eps = re.findall(r'[\d\.\d]+', alg_name)
temp_eps = np.array(list(map(float, temp_eps)))
temp_eps = temp_eps[temp_eps<=1]
temp_eps = temp_eps[0]
temp_args = args.copy()
temp_args['eps'] = temp_eps
attacker = RewardAttacker(**temp_args)
regret = np.zeros((nb_simu, T//frequency)) #[[]] * nb_simu #np.zeros((nb_simu, T))
draws = regret.copy()
epsilon_norm = np.zeros((nb_simu, T//frequency)) #[[]] * nb_simu #np.zeros((nb_simu, T))
# thetas_alg = np.zeros((nb_simu, T, model.n_actions, model.n_features))
# prod_scalar = np.zeros((nb_simu, T, model.n_actions, model.n))
rewards_range = np.zeros((nb_simu, T//frequency)) #[[]] * nb_simu # np.zeros((nb_simu, T))
for k in range(nb_simu):
alg.reset()
if 'attacked' in alg_name and not 'stationary' in alg_name:
attacker.reset()
attack_acumulator = 0
regret_accumulator = 0
rewards_range_max = 0
draws_accumulator = 0
for t in trange(T):
context = model.get_context()
a_t = alg.get_action(context)
r_t = model.reward(context, a_t)
if 'attacked' in alg_name:
if not 'stationary' in alg_name:
attacker.update(context, a_t, r_t)
attack_t = attacker.compute_attack(a_t, context, target_arm)
else:
if a_t != target_arm:
attack_t = -r_t + noise*np.random.randn()
else:
attack_t = 0
# print('attack_t =', attack_t)
else:
attack_t = 0
alg.update(context, a_t, min(1, max(0, r_t+attack_t)))
attack_acumulator+= np.abs(attack_t)
regret_accumulator+= model.best_arm_reward(context) - np.dot(model.thetas[a_t], context)
rewards_range_max = max(rewards_range_max, min(1, max(r_t + attack_t, 0)))
draws_accumulator +=1 if a_t == target_arm else 0
if t % frequency == 0: # logging
epsilon_norm[k, t // frequency]= attack_acumulator
regret[k, t // frequency]= regret_accumulator
rewards_range[k, t // frequency]= rewards_range_max
draws[k, t // frequency]= draws_accumulator
attack_acumulator = 0
regret_accumulator = 0
rewards_range_max = 0
draws_accumulator = 0
# print('reward = ', min(1, max(r_t + attack_t, 0)))
# print('Target arm =', target_arm, 'a_t =', a_t)
# alg.update(context, a_t, r_t + attack_t)
# if hasattr(alg, 'thetas_hat'):
# thetas_alg[k, t] = alg.thetas_hat
# for a in range(model.n_actions):
# for i, x in enumerate(model.context_lists):
# if 'attacked' in alg_name:
# p = np.dot(alg.thetas_hat[a], x) - (1 - attacker.eps) * np.dot(model.thetas[target_arm], x)
# else:
# p = np.dot(alg.thetas_hat[a], x) - np.dot(model.thetas[target_arm], x)
# prod_scalar[k, t, a, i] = p
# print('-'*100)
# print('r_t =', r_t)
# print('atttack_t =', attack_t)
# print('r_t + attack_t = ', r_t + attack_t)
# rewards_range[k, t] = min(1, max(r_t + attack_t, 0))
AAA += [(alg_name, {"regret": regret, "attack_cond": epsilon_norm, "target_draws": draws, "thetas": (),
"prod_scalar": (), "range_rewards": rewards_range})]
return m, AAA, model, target_arm
def run_and_output(dataset=None):
results = []
if PARALLEL:
import multiprocessing
num_cores = multiprocessing.cpu_count()
results = Parallel(n_jobs=num_cores, verbose=1)(
delayed(work)(m=m, nb_arms=K, nb_features=n_features, noise=a_noise,
nb_simu=nb_simu, T=T, all_algs=algorithms,
random_state=random_state + m, M=M, which=dataset) for m in range(nb_models))
else:
for m in tqdm(range(nb_models)):
ret = work(m, K, n_features, a_noise, nb_simu, T, algorithms, random_state + m, M=M)
results.append(ret)
id = '{:%Y%m%d_%H%M%S}'.format(datetime.datetime.now())
pickle_name = "{}_{}_{}_contextual_attacks_rewards.pickle".format(dataset, id, "PAR" if PARALLEL else "SEQ")
print(pickle_name)
with open(pickle_name, "wb") as f:
pickle.dump(results, f)
with open("{}_{}_{}_contextual_attacks_rewards_settings.json".format(dataset, id, "PAR" if PARALLEL else "SEQ"), "w+") as f:
json.dump(settings, f)
return results, pickle_name, id,
if __name__ == '__main__':
PARALLEL = False
print("PARALLEL: {}".format(PARALLEL))
MABResults = namedtuple('MABResults', 'regret, attack_cond, target_draws, thetas, prod_scalar')
random_state = np.random.randint(0, 123123)
np.random.seed(random_state)
print("seed: {}".format(random_state))
K = 10
n_features = 30
a_noise = 0.1
delta = 0.01
la = 0.1
T = 1*10**6 # horizon
nb_models = 5
nb_simu = 25
M = 5
# attack_parameter_to_test = np.linspace(0, 1, 10)
attack_parameter_to_test = np.array([1/2])
settings = {
"T": T,
"nb_simu": nb_simu,
"nb_models": nb_models,
"random_state": random_state,
"K": K,
"dimension": n_features,
"epsilon_tested": list(attack_parameter_to_test),
'frequency': frequency
}
algorithms = {
'LinUCB': lambda nb_arms, dimension, bound_features, bound_context, reg_factor=la, delta=delta, noise_variance=a_noise:
ContextualLinearBandit(reg_factor=la,
#
# 'LinTS': lambda nb_arms, dimension, bound_features, bound_context, reg_factor=la, delta=delta,
# noise_variance=a_noise: ContextualLinearTS(nb_arms=nb_arms,
# dimension=dimension,
# reg_factor=reg_factor,
# delta=delta,
# noise_variance=noise_variance),
# 'Exp4': lambda nb_arms, dimension, experts, eta, bound_features, bound_context, reg_factor=la, delta=delta,
# noise_variance=a_noise: Exp4(nb_arms=nb_arms,
# dimension=dimension,
# experts=experts,
# eta=eta,
# gamma=0),
# 'eps-greedy': lambda nb_arms, dimension, bound_features, bound_context, reg_factor=la, delta=delta,
# noise_variance=a_noise: contextEpsGREEDY(number_arms=nb_arms, dimension=dimension,
# decrease_epsilon=True, reg_factor=reg_factor),
delta=delta,
nb_arms=nb_arms,
dimension=dimension,
noise_variance=noise_variance,
bound_features=bound_features,
bound_context=bound_context),
'LinTS': lambda nb_arms, dimension, bound_features, bound_context, reg_factor=la, delta=delta,
noise_variance=a_noise: ContextualLinearTS(nb_arms=nb_arms,
dimension=dimension,
reg_factor=reg_factor,
delta=delta,
noise_variance=noise_variance),
'Exp4': lambda nb_arms, dimension, experts, eta, bound_features, bound_context, reg_factor=la, delta=delta,
noise_variance=a_noise: Exp4(nb_arms=nb_arms,
dimension=dimension,
experts=experts,
eta=eta,
gamma=0),
'eps-greedy': lambda nb_arms, dimension, bound_features, bound_context, reg_factor=la, delta=delta,
noise_variance=a_noise: contextEpsGREEDY(number_arms=nb_arms, dimension=dimension,
decrease_epsilon=True, reg_factor=reg_factor),
}
algorithms.update({
# 'LinUCB attacked stationary': lambda nb_arms, dimension, bound_features, bound_context, reg_factor=la, delta=delta, noise_variance=a_noise:
# ContextualLinearBandit(reg_factor=la,
# delta=delta,
# nb_arms=nb_arms,
# dimension=dimension,
# noise_variance=noise_variance,
# bound_features=bound_features,
# bound_context=bound_context),
#
# 'LinTS attacked stationary': lambda nb_arms, dimension, bound_features, bound_context, reg_factor=la, delta=delta,
# noise_variance=a_noise: ContextualLinearTS(nb_arms=nb_arms,
# dimension=dimension,
# reg_factor=reg_factor,
# delta=delta,
# noise_variance=noise_variance),
# 'Exp4 attacked stationary': lambda nb_arms, dimension, experts, eta, bound_features, bound_context, reg_factor=la, delta=delta,
# noise_variance=a_noise: Exp4(nb_arms=nb_arms,
# dimension=dimension,
# experts=experts,
# eta=eta,
# gamma=0),
#
# 'eps-greedy attacked stationary': lambda nb_arms, dimension, bound_features, bound_context, reg_factor=la, delta=delta,
# noise_variance=a_noise: contextEpsGREEDY(number_arms=nb_arms, dimension=dimension,
# decrease_epsilon=True, reg_factor=reg_factor),
'LinUCB attacked stationary': lambda nb_arms, dimension, bound_features, bound_context, reg_factor=la, delta=delta, noise_variance=a_noise:
ContextualLinearBandit(reg_factor=la,
delta=delta,
nb_arms=nb_arms,
dimension=dimension,
noise_variance=noise_variance,
bound_features=bound_features,
bound_context=bound_context),
'LinTS attacked stationary': lambda nb_arms, dimension, bound_features, bound_context, reg_factor=la, delta=delta,
noise_variance=a_noise: ContextualLinearTS(nb_arms=nb_arms,
dimension=dimension,
reg_factor=reg_factor,
delta=delta,
noise_variance=noise_variance),
'Exp4 attacked stationary': lambda nb_arms, dimension, experts, eta, bound_features, bound_context, reg_factor=la, delta=delta,
noise_variance=a_noise: Exp4(nb_arms=nb_arms,
dimension=dimension,
experts=experts,
eta=eta,
gamma=0),
'eps-greedy attacked stationary': lambda nb_arms, dimension, bound_features, bound_context, reg_factor=la, delta=delta,
noise_variance=a_noise: contextEpsGREEDY(number_arms=nb_arms, dimension=dimension,
decrease_epsilon=True, reg_factor=reg_factor),
})
for eps in attack_parameter_to_test:
algorithms.update({
# 'LinUCB attacked gamma {}'.format(eps): lambda nb_arms, dimension, bound_features, bound_context,
# reg_factor=la, delta=delta, noise_variance=a_noise:
# ContextualLinearBandit(reg_factor=la,
# delta=delta,
# nb_arms=nb_arms,
# dimension=dimension,
# noise_variance=noise_variance,
# bound_features=bound_features,
# bound_context=bound_context),
# 'LinTS attacked gamma {}'.format(eps): lambda nb_arms, dimension, bound_features, bound_context, reg_factor=la, delta=delta,
# noise_variance=a_noise: ContextualLinearTS(nb_arms=nb_arms,
# dimension=dimension,
# reg_factor=reg_factor,
# delta=delta,
# noise_variance=noise_variance),
# 'Exp4 attacked gamma {}'.format(eps): lambda nb_arms, dimension, experts, eta, bound_features, bound_context, reg_factor=la, delta=delta,
# noise_variance=a_noise: Exp4(nb_arms=nb_arms,
# dimension=dimension,
# experts=experts,
# eta=eta,
# gamma=0),
#
# 'eps-greedy attacked gamma {}'.format(eps): lambda nb_arms, dimension, bound_features, bound_context, reg_factor=la, delta=delta,
# noise_variance=a_noise: contextEpsGREEDY(number_arms=nb_arms, dimension=dimension,
# decrease_epsilon=True, reg_factor=reg_factor)
'LinUCB attacked gamma {}'.format(eps): lambda nb_arms, dimension, bound_features, bound_context,
reg_factor=la, delta=delta, noise_variance=a_noise:
ContextualLinearBandit(reg_factor=la,
delta=delta,
nb_arms=nb_arms,
dimension=dimension,
noise_variance=noise_variance,
bound_features=bound_features,
bound_context=bound_context),
'LinTS attacked gamma {}'.format(eps): lambda nb_arms, dimension, bound_features, bound_context, reg_factor=la, delta=delta,
noise_variance=a_noise: ContextualLinearTS(nb_arms=nb_arms,
dimension=dimension,
reg_factor=reg_factor,
delta=delta,
noise_variance=noise_variance/5),
'Exp4 attacked gamma {}'.format(eps): lambda nb_arms, dimension, experts, eta, bound_features, bound_context, reg_factor=la, delta=delta,
noise_variance=a_noise: Exp4(nb_arms=nb_arms,
dimension=dimension,
experts=experts,
eta=eta,
gamma=0),
'eps-greedy attacked gamma {}'.format(eps): lambda nb_arms, dimension, bound_features, bound_context, reg_factor=la, delta=delta,
noise_variance=a_noise: contextEpsGREEDY(number_arms=nb_arms, dimension=dimension,
decrease_epsilon=True, reg_factor=reg_factor)
})
print(algorithms)
# results, pickle_name, id = run_and_output(dataset=None)
# results, pickle_name, id = run_and_output(dataset='jester')
results, pickle_name, id = run_and_output(dataset='movielens')
else:
for m in tqdm(range(nb_models)):
ret = work(m, K, n_features, a_noise, nb_simu, T, algorithms, random_state + m, M=M, dataset=true_data, which=which)
results.append(ret)
# id = '{:%Y%m%d_%H%M%S}'.format(datetime.datetime.now())
# pickle_name = "{}_{}_contextual_attacks_rewards.pickle".format(id, "PAR" if PARALLEL else "SEQ")
# print(pickle_name)
# with open(pickle_name, "wb") as f:
# pickle.dump(results, f)
# with open("{}_{}_contextual_attacks_rewards_settings.json".format(id, "PAR" if PARALLEL else "SEQ"), "w+") as f:
# json.dump(settings, f)
n = 9 # Number of colors
new_colors = [plt.get_cmap('Set1')(1. * i / n) for i in range(n)]
linestyle_cycler = cycler('linestyle', ['-', '--', ':', '-.', '-', '--', ':', '-.', '-'])
plt.rc('axes', prop_cycle=(cycler('color', new_colors) + linestyle_cycler))
plt.rc('lines', linewidth=2)
for alg_name, res in results[0][1]:
algorithms[alg_name] = {'regret': np.zeros((nb_models, nb_simu, T)),
'cost': np.zeros((nb_models, nb_simu, T))}
for m in range(nb_models):
res = results[m][1]
for i, val in enumerate(res):
alg_name = val[0]
val = val[1]
algorithms[alg_name]['regret'][m, :, :] = val['regret']
algorithms[alg_name]['cost'][m, :, :] = val['attack_cond']
plt.figure(1, figsize=(8, 8))
t = np.linspace(0, T-1, T, dtype='int')
for alg_name, res in algorithms.items():
res['regret'] = res['regret'].cumsum(axis=2)
mean_regret = np.mean(res['regret'], axis=(0, 1))
low_quantile = np.quantile(res['regret'], 0.1, axis=(0, 1))
high_quantile = np.quantile(res['regret'], 1 - 0.1, axis=(0, 1))
plt.plot(mean_regret, label=alg_name)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
plt.title('Cumulative regret')
plt.legend()
plt.show()
# n = 9 # Number of colors
# new_colors = [plt.get_cmap('Set1')(1. * i / n) for i in range(n)]
# linestyle_cycler = cycler('linestyle', ['-', '--', ':', '-.', '-', '--', ':', '-.', '-'])
# plt.rc('axes', prop_cycle=(cycler('color', new_colors) + linestyle_cycler))
# plt.rc('lines', linewidth=2)
# for alg_name, res in results[0][1]:
# algorithms[alg_name] = {'regret': np.zeros((nb_models, nb_simu, T)),
# 'cost': np.zeros((nb_models, nb_simu, T))}
# for m in range(nb_models):
# res = results[m][1]
# for i, val in enumerate(res):
# alg_name = val[0]
# val = val[1]
# algorithms[alg_name]['regret'][m, :, :] = val['regret']
# algorithms[alg_name]['cost'][m, :, :] = val['attack_cond']
# plt.figure(1, figsize=(8, 8))
# t = np.linspace(0, T-1, T, dtype='int')
# for alg_name, res in algorithms.items():
# res['regret'] = res['regret'].cumsum(axis=2)
# mean_regret = np.mean(res['regret'], axis=(0, 1))
# low_quantile = np.quantile(res['regret'], 0.1, axis=(0, 1))
# high_quantile = np.quantile(res['regret'], 1 - 0.1, axis=(0, 1))
# plt.plot(mean_regret, label=alg_name)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.title('Cumulative regret')
#
# plt.legend()
#
# plt.figure(2, figsize=(8,8))
# t = np.linspace(0, T-1, T, dtype='int')
# for alg_name, res in algorithms.items():
# res['cost'] = res['cost'].cumsum(axis=2)
# mean_regret = np.mean(res['cost'], axis=(0, 1))
# low_quantile = np.quantile(res['cost'], 0.1, axis=(0, 1))
# high_quantile = np.quantile(res['cost'], 1 - 0.1, axis=(0, 1))
# plt.plot(mean_regret, label=alg_name)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.title('Total cost')
#
# plt.legend()
#
# plt.show()
# for res in results:
# alg_name, val = res[1][0], res[1][1]
# print(alg_name)
# mean_regret = np.mean(val.regret.cumsum(axis=1), axis=0)
# t = np.linspace(0, T, T, dtype='int')
# low_quantile = np.quantile(val.regret.cumsum(axis=1), 0.1, axis=0)
# high_quantile = np.quantile(val.regret.cumsum(axis=1), 0.9, axis=0)
#
# plt.figure(0)
# plt.title('Cumulative Regret')
# plt.plot(mean_regret, label=alg_name)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
# mean_condition = np.mean(np.cumsum(val.target_draws == target_arm, axis=1), axis=0)
# low_quantile = np.quantile(val.attack_cond, 0.1, axis=0)
# high_quantile = np.quantile(val.attack_cond, 0.9, axis=0)
# plt.figure(1)
# plt.title('Draws target arm')
# plt.plot(mean_condition, label=alg_name)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
# plt.figure(2)
# plt.title('Cumulative attack norm attacked reward')
# if 'Attacked' in alg_name:
# plt.plot(np.mean(np.cumsum(val.attack_cond, axis=1), axis=0), label=alg_name)
# low_quantile = np.quantile(np.cumsum(val.attack_cond, axis=1), 0.1, axis=0)
# high_quantile = np.quantile(np.cumsum(val.attack_cond, axis=1), 0.9, axis=0)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
#
#
# plt.figure(4)
# plt.title('Error true env and learned env')
# for a in range(model.n_actions):
# error = np.linalg.norm(val.thetas[:, :, a] - model.thetas[a], axis=2)
# plt.plot(t, np.mean(error, axis=0), label=alg_name + ' arm {}'.format(a))
# low_quantile = np.quantile(error, 0.1, axis=0)
# high_quantile = np.quantile(error, 0.9, axis=0)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
# if 'weak' in alg_name:
# plt.figure(6)
# plt.title('Difference estimated reward random context {}'.format(other_context))
# for a in range(model.n_actions):
# plt.plot(t, np.mean(val.prod_scalar[:, :, a, other_context], axis=0), label=alg_name + ' arm {}'.format(a))
# low_quantile = np.quantile(val.prod_scalar[:, :, a, other_context], 0.1, axis=0)
# high_quantile = np.quantile(val.prod_scalar[:, :, a, other_context], 0.9, axis=0)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
#
# if not 'weak' in alg_name:
# plt.figure(7)
# plt.title('Error biased env and learned env')
# for a in range(model.n_actions):
# error = np.linalg.norm(val.thetas[:, :, a] - model.thetas[target_arm]*(1 - attack_parameter), axis=2)
# plt.plot(t, np.mean(error, axis=0), label=alg_name + ' arm {}'.format(a))
# low_quantile = np.quantile(error, 0.1, axis=0)
# high_quantile = np.quantile(error, 0.9, axis=0)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
# plt.figure(8)
# plt.title('Number of pulls target arm attack context')
# plt.plot(t, np.mean(np.cumsum(val.target_draws == target_arm, axis=1), axis=0), label=alg_name + ' arm {}'.format(a))
# low_quantile = np.quantile(np.cumsum(val.target_draws == target_arm, axis=1), 0.1, axis=0)
# high_quantile = np.quantile(np.cumsum(val.target_draws == target_arm, axis=1), 0.9, axis=0)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
| ContextualBanditsAttacks-main | isoexp/devfair_reward_attack.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from isoexp.contextual.contextual_models import RandomContextualLinearArms
from isoexp.contextual.contextual_linucb import *
from tqdm import trange
from collections import namedtuple
from cycler import cycler
import matplotlib.pyplot as plt
import cvxpy as cp
def compute_attack(model, action, context, a_star, x_star, slack=10**-3):
if action != a_star:# and np.linalg.norm(context - x_star) < 10**-5:
delta = np.maximum(2*np.dot(model.thetas[action], x_star)/np.dot(model.thetas[a_star], x_star), 1)
epsilon = (delta - 1)*context
return epsilon
else:
return np.zeros((model.n_features,))
n = 6 # Number of colors
new_colors = [plt.get_cmap('Set1')(1. * i / n) for i in range(n)]
linestyle_cycler = cycler('linestyle', ['-', '--', ':', '-.', '-', '--'])
plt.rc('axes', prop_cycle=(cycler('color', new_colors) + linestyle_cycler))
plt.rc('lines', linewidth=2)
MABResults = namedtuple('MABResults', 'regret, attack_cond, target_draws, thetas, prod_scalar, context_norm')
seed = np.random.randint(0, 10 ** 5)
print('seed = ', seed)
noise = 0.1
model = RandomContextualLinearArms(n_actions=3, n_features=10, noise=noise, random_state=seed, bound_context=1)
model.thetas = model.thetas
theta_bound = np.max(np.linalg.norm(model.thetas, 2, axis=(1)))
target_context = np.random.randint(low=0, high=len(model.context_lists))
other_context = np.random.randint(low=0, high=len(model.context_lists))
while other_context == target_context:
other_context = np.random.randint(low=0, high=len(model.context_lists))
x_star = model.context_lists[target_context]
means_x_star = np.dot(model.thetas, x_star)
#target_arm = np.random.randint(low=0, high=model.n_actions)
target_arm = np.argmin(means_x_star)
T = 1*10**4
nb_simu = 5
print('a star=', target_arm)
print('x_star', x_star)
print('means for context x_star:', np.dot(model.thetas, x_star))
algorithms = {
'LinUCB': ContextualLinearBandit(nb_arms=model.n_actions, dimension=model.n_features,
reg_factor=0.1, delta=0.99,
bound_features=np.max(np.linalg.norm(model.thetas, axis=1)),
noise_variance=noise, bound_context=model.bound_context),
'LinUCB-Attacked': ContextualLinearBandit(nb_arms=model.n_actions, dimension=model.n_features,
reg_factor=0.1, delta=0.99,
bound_features=np.max(np.linalg.norm(model.thetas, axis=1)),
noise_variance=noise, bound_context=model.bound_context)
}
results = []
for alg_name, alg in algorithms.items():
regret = np.zeros((nb_simu, T))
nb_target_arms = np.zeros((nb_simu, T))
nb_attack_needed = np.zeros((nb_simu, T))
attack_condition = np.zeros((nb_simu, T))
draws = np.zeros((nb_simu, T))
context_norm = draws.copy()
epsilon_norm = np.zeros((nb_simu, T))
thetas_alg = np.zeros((nb_simu, T, model.n_actions, model.n_features))
prod_scalar = np.zeros((nb_simu, T, model.n_actions, model.n))
for k in trange(nb_simu, desc='Simulating {}'.format(alg_name)):
alg.reset()
for t in range(T):
context = model.get_context()
a_t = alg.get_action(context)
old_context = context
if alg_name == 'LinUCB-Attacked':
epsilon = compute_attack(model, a_t, context, target_arm, x_star)
context = context + epsilon
alg.iteration -= 1
a_t = alg.get_action(context)
epsilon_norm[k, t] = np.linalg.norm(epsilon)
thetas_alg[k, t] = alg.thetas_hat
for a in range(model.n_actions):
for i, x in enumerate(model.context_lists):
p = np.dot(model.thetas[a], x) - np.dot(alg.thetas_hat[a], x)
prod_scalar[k, t, a, i] = p
r_t = model.reward(old_context, a_t)
alg.update(context, a_t, r_t)
regret[k, t] = model.best_arm_reward(old_context) - np.dot(model.thetas[a_t], old_context)
context_norm[k, t] = np.linalg.norm(context)
draws[k, t] = a_t
results += [(alg_name, MABResults(regret=regret, attack_cond=attack_condition, target_draws=draws,
thetas=thetas_alg, prod_scalar=prod_scalar, context_norm=context_norm))]
print('Target arm =', target_arm)
print('draws = ', np.mean(np.cumsum(draws == target_arm, axis=1),axis=0))
for i, (alg_name, val) in enumerate(results):
mean_regret = np.mean(val.regret.cumsum(axis=1), axis=0)
t = np.linspace(0, T, T, dtype='int')
low_quantile = np.quantile(val.regret.cumsum(axis=1), 0.1, axis=0)
high_quantile = np.quantile(val.regret.cumsum(axis=1), 0.9, axis=0)
plt.figure(0)
plt.title('Regret Attacked context')
plt.plot(mean_regret, label=alg_name)
plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
plt.legend()
# mean_condition = np.mean(np.cumsum(val.target_draws == target_arm, axis=1), axis=0)
# low_quantile = np.quantile(val.attack_cond, 0.1, axis=0)
# high_quantile = np.quantile(val.attack_cond, 0.9, axis=0)
# plt.figure(1)
# plt.title('Draws target arm')
# plt.plot(mean_condition, label=alg_name)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
if 'Attacked' in alg_name:
plt.figure(2)
plt.title('Cumulative attack norm attacked context')
plt.plot(np.mean(np.cumsum(epsilon_norm, axis=1), axis=0), label=alg_name)
low_quantile = np.quantile(np.cumsum(epsilon_norm, axis=1), 0.1, axis=0)
high_quantile = np.quantile(np.cumsum(epsilon_norm, axis=1), 0.9, axis=0)
plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
plt.legend()
plt.figure(4)
plt.title('Error true env and learned env attack context')
for a in range(model.n_actions):
error = np.linalg.norm(val.thetas[:, :, a] - model.thetas[a], axis=2)
plt.plot(t, np.mean(error, axis=0), label=alg_name + ' arm {}'.format(a))
low_quantile = np.quantile(error, 0.1, axis=0)
high_quantile = np.quantile(error, 0.9, axis=0)
plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
plt.legend()
plt.figure(7)
plt.title('Error biased env and learned env attack context')
for a in range(model.n_actions):
error = np.linalg.norm(val.thetas[:, :, a] - model.thetas[a]/np.maximum(2*np.dot(model.thetas[a], x_star)/np.dot(model.thetas[target_arm], x_star), 1), axis=2)
plt.plot(t, np.mean(error, axis=0), label=alg_name + ' arm {}'.format(a))
low_quantile = np.quantile(error, 0.1, axis=0)
high_quantile = np.quantile(error, 0.9, axis=0)
plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
plt.legend()
plt.figure(5)
plt.title('Difference estimated reward for target context {}'.format(target_context))
for a in range(model.n_actions):
plt.plot(t, np.mean(val.prod_scalar[:, :, a, target_context], axis=0), label=alg_name + ' arm {}'.format(a))
low_quantile = np.quantile(val.prod_scalar[:, :, a, target_context], 0.1, axis=0)
high_quantile = np.quantile(val.prod_scalar[:, :, a, target_context], 0.9, axis=0)
plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
plt.legend()
plt.figure(6)
plt.title('Difference estimated reward for a random non target context {}'.format(other_context))
for a in range(model.n_actions):
plt.plot(t, np.mean(val.prod_scalar[:, :, a, other_context], axis=0), label=alg_name + ' arm {}'.format(a))
low_quantile = np.quantile(val.prod_scalar[:, :, a, other_context], 0.1, axis=0)
high_quantile = np.quantile(val.prod_scalar[:, :, a, other_context], 0.9, axis=0)
plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
plt.legend()
plt.figure(8)
plt.title('Number of pulls target arm attack context')
plt.plot(t, np.mean(np.cumsum(val.target_draws == target_arm, axis=1), axis=0), label=alg_name + ' arm {}'.format(a))
low_quantile = np.quantile(np.cumsum(val.target_draws == target_arm, axis=1), 0.1, axis=0)
high_quantile = np.quantile(np.cumsum(val.target_draws == target_arm, axis=1), 0.9, axis=0)
plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
plt.legend()
plt.show()
#print('Target arms=', np.mean(np.cumsum(nb_target_arms,axis=1)))
#print('Attack needed arms=', np.mean(np.cumsum(nb_attack_needed,axis=1))) | ContextualBanditsAttacks-main | isoexp/main_attacked_context.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
__version__ = '0.0.dev0'
| ContextualBanditsAttacks-main | isoexp/__init__.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def in_hull(points, x):
n_points = len(points)
n_dim = len(x)
c = np.zeros(n_points)
A = np.r_[points.T, np.ones((1, n_points))]
b = np.r_[x, np.ones(1)]
lp = linprog(c, A_eq=A, b_eq=b)
return lp.success
def compute_relaxed_attack(alg, a_star, x_star, slack=10**-10):
d = model.n_features
delta = cp.Variable(d)
obj = cp.Minimize(cp.quad_form(delta, np.eye(d))/2)
theta_a_star = alg.thetas_hat[a_star]
betas = alg.alpha()
constraints = []
P_a_star = sqrtm(alg.inv_design_matrices[a_star])
for a in range(len(alg.thetas_hat)):
if a != a_star:
theta = alg.thetas_hat[a]
P = sqrtm(alg.inv_design_matrices[a])
temp_constraint = (theta - theta_a_star)@(x_star+delta) + betas[a]*cp.norm(P@(x_star+delta))\
#- betas[a_star]R * (cp.norm(P_a_star @ x_star) + (alg.inv_design_matrices[a] @ x_star) @
# delta/cp.norm(P_a_star @ x_star))
constraints.append(temp_constraint + slack <= 0)
prob = cp.Problem(obj, constraints)
try:
prob.solve(verbose=False, max_iters=1000, feastol=10**-8)
epsilon = delta.value
# print('epsilon =', epsilon)
# for a in range(len(alg.thetas_hat)):
# if a != a_star:
# theta_a_star = alg.thetas_hat[a_star]
# betas = alg.alpha()
# theta = alg.thetas_hat[a]
# P = sqrtm(alg.inv_design_matrices[a])
# P_a_star = sqrtm(alg.inv_design_matrices[a_star])
# print('The constraint for arm {}:'.format(a), np.dot(theta - theta_a_star, (x_star+epsilon)) + betas[a]*np.linalg.norm(P.dot((x_star+epsilon))) \
# - betas[a_star] * (np.linalg.norm(P_a_star.dot(x_star)) +
# np.dot((alg.inv_design_matrices[a].dot(x_star)), epsilon)/np.linalg.norm(P_a_star.dot(x_star))))
except:
print('Exception')
epsilon = np.zeros((d,))
# print('epsilon =', epsilon)
if epsilon is None:
return np.zeros((d,))
return epsilon
def compute_attack(alg, a_star, x_star, slack=10 **-10):
d = model.n_features
func = lambda delta: np.linalg.norm(delta)/2
theta_a_star = alg.thetas_hat[a_star]
P_a_star = sqrtm(alg.inv_design_matrices[a_star])
betas = alg.alpha()
constraints_list = []
for a in range(len(alg.thetas_hat)):
if a != a_star:
theta = alg.thetas_hat[a]
P = sqrtm(alg.inv_design_matrices[a])
temp_constraint = lambda delta, P=P, P_a_star=P_a_star, theta=theta, theta_a_star=theta_a_star, beta=betas[a], beta_a_star=betas[a_star]: \
-((theta - theta_a_star).dot((x_star + delta)) + beta * np.linalg.norm(P.dot((x_star + delta)))
- beta_a_star * np.linalg.norm(P_a_star.dot((x_star + delta))) + slack)
temp_cons = {'type': 'ineq', 'fun': temp_constraint}
constraints_list.append(temp_cons)
cons = tuple(constraints_list)
res = minimize(func, -x_star, method='SLSQP', constraints=cons)
# print(res.message)
try:
epsilon = res.x
except:
epsilon = np.zeros((d,))
if epsilon is None:
return np.zeros((d,))
return epsilon
n = 10 # Number of colors
new_colors = [plt.get_cmap('Set1')(1. * i / n) for i in range(n)]
# linestyle_cycler = cycler('linestyle', ['-', '--', ':', '-.', '-', '--', ':', '-.', '-'])
# plt.rc('axes', prop_cycle=(cycler('color', new_colors) + linestyle_cycler))
plt.rc('lines', linewidth=2)
MABResults = namedtuple('MABResults', 'regret, attack_cond, target_draws, thetas, design_matrix, context_norm, betas')
seed = np.random.randint(0, 10 ** 5)
print('seed = ', seed)
noise = 0.1
#model = RandomContextualLinearArms(n_actions=4, n_features=2, noise=noise, random_state=seed, bound_context=1)
model = AttackOneUserModel(n_actions=25, n_features=20, noise=noise, random_state=seed, bound_context=1, distance=1/2)
model.add_target_arm()
model.thetas = model.thetas
theta_bound = np.max(np.linalg.norm(model.thetas, 2, axis=(1)))
target_context = np.random.randint(low=0, high=len(model.context_lists))
# other_context = np.random.randint(low=0, high=len(model.context_lists))
# while other_context == target_context:
# other_context = np.random.randint(low=0, high=len(model.context_lists))
x_star = model.context_lists[target_context]
means_x_star = np.dot(model.thetas, x_star)
#target_arm = np.random.randint(low=0, high=model.n_actions)
# target_arm = np.argmin(means_x_star)
target_arm = model.n_actions-1
T = int(1*10**5)
nb_simu = 1
mask = np.ones(model.n_actions, dtype='int')
mask[target_arm] = 0
print('a star=', target_arm)
print('x_star', x_star)
print('means for context x_star:', np.dot(model.thetas, x_star))
print(in_hull(x=model.thetas[target_arm], points=np.array(model.thetas[mask])))
if in_hull(x=model.thetas[target_arm], points=np.array(model.thetas[mask])):
raise ValueError()
algorithms = {
'LinUCB': ContextualLinearBandit(nb_arms=model.n_actions, dimension=model.n_features,
reg_factor=0.1, delta=0.01,
bound_features=np.max(np.linalg.norm(model.thetas, axis=1)),
noise_variance=noise, bound_context=model.bound_context),
# 'LinUCB-Attacked': ContextualLinearBandit(nb_arms=model.n_actions, dimension=model.n_features,
# reg_factor=0.1, delta=0.99,
# bound_features=np.max(np.linalg.norm(model.thetas, axis=1)),
# noise_variance=noise, bound_context=model.bound_context),
'LinUCB-RelaxedAttacked': ContextualLinearBandit(nb_arms=model.n_actions, dimension=model.n_features,
reg_factor=0.1, delta=0.01,
bound_features=np.max(np.linalg.norm(model.thetas, axis=1)),
noise_variance=noise, bound_context=model.bound_context)
}
results = []
for alg_name, alg in algorithms.items():
regret = np.zeros((nb_simu, T))
nb_target_arms = np.zeros((nb_simu, T))
nb_attack_needed = np.zeros((nb_simu, T))
attack_condition = np.zeros((nb_simu, T))
draws = [[]]*nb_simu
context_norm = np.zeros((nb_simu, T))
epsilon_norm = np.zeros((nb_simu, T))
thetas_alg = np.zeros((nb_simu, T, model.n_actions, model.n_features))
prod_scalar = np.zeros((nb_simu, T, model.n_actions, model.n_user))
for k in trange(nb_simu, desc='Simulating {}'.format(alg_name)):
alg.reset()
for t in trange(T):
context = model.get_context()
old_context = context
if 'Attacked' in alg_name:
if np.linalg.norm(context - x_star) < 10**-10:
if 'Relaxed' in alg_name:
epsilon = compute_relaxed_attack(alg, target_arm, context, slack=10**-4)
else:
epsilon = compute_attack(alg, target_arm, context, slack=10 ** -3)
else:
epsilon = np.zeros((model.n_features,))
context = context + epsilon
epsilon_norm[k, t] = np.linalg.norm(epsilon)
a_t = alg.get_action(context)
if np.linalg.norm(x_star - old_context) < 10**-10:
draws[k].append(a_t)
thetas_alg[k, t] = alg.thetas_hat
# for a in range(model.n_actions):
# for i, x in enumerate(model.context_lists):
# p = np.dot(model.thetas[a], x) - np.dot(alg.thetas_hat[a], x)
# prod_scalar[k, t, a, i] = p
r_t = model.reward(old_context, a_t)
alg.update(context, a_t, r_t)
regret[k, t] = model.best_arm_reward(old_context) - np.dot(model.thetas[a_t], old_context)
context_norm[k, t] = np.linalg.norm(context)
draws[k] = np.array(draws[k])
draws = np.array(draws)
print(draws.shape)
print('Norm attacks=', epsilon_norm)
results += [(alg_name, MABResults(regret=regret, attack_cond=attack_condition, target_draws=draws,
thetas=thetas_alg, design_matrix=alg.inv_design_matrices, context_norm=context_norm, betas=alg.alpha()))]
print('Target arm =', target_arm)
print('draws = ', np.mean(np.cumsum(draws == target_arm, axis=1),axis=0))
for i, (alg_name, val) in enumerate(results):
mean_regret = np.mean(val.regret.cumsum(axis=1), axis=0)
t = np.linspace(0, T, T, dtype='int')
low_quantile = np.quantile(val.regret.cumsum(axis=1), 0.1, axis=0)
high_quantile = np.quantile(val.regret.cumsum(axis=1), 0.9, axis=0)
plt.figure(0)
plt.title('Regret Attacked context')
plt.plot(mean_regret, label=alg_name)
plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
plt.legend()
mean_condition = np.mean(np.cumsum(val.target_draws == target_arm, axis=1), axis=0)
low_quantile = np.quantile(val.attack_cond, 0.1, axis=0)
high_quantile = np.quantile(val.attack_cond, 0.9, axis=0)
plt.figure(1)
plt.title('Draws target arm')
plt.plot(mean_condition, label=alg_name)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
plt.legend()
### TODO: Change this plot to have the number of time the context has been presented in x axis
if 'Attacked' in alg_name:
plt.figure(2)
plt.title('Cumulative attack norm attacked context')
plt.plot(np.mean(np.cumsum(epsilon_norm, axis=1), axis=0), label=alg_name)
low_quantile = np.quantile(np.cumsum(epsilon_norm, axis=1), 0.1, axis=0)
high_quantile = np.quantile(np.cumsum(epsilon_norm, axis=1), 0.9, axis=0)
plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
plt.legend()
if model.n_features == 2:
for i, (alg_name, val) in enumerate(results):
plt.figure(i + 3)
plt.title('Confidence ellipse for {}'.format(alg_name))
x = np.linspace(0, 2*np.pi)
x_1 = np.cos(x)
y_1 = np.sin(x)
X = np.vstack((x_1, y_1))
betas = val.betas
for a in range(model.n_actions):
center = val.thetas[-1, -1, a]
V = sqrtm(val.design_matrix[a])
y = center.reshape((2, 1)) + betas[a] * np.dot(V, X)
plt.plot(y[0, :], y[1, :], label = 'confidence ellipse arm {}'.format(a))
plt.fill_between(y[0,:], y[1,:], (center.reshape((2, 1))*np.ones((2, 50)))[1, :], alpha=0.15)
plt.scatter(model.thetas[a][0],model.thetas[a][1], c=new_colors[a])
plt.scatter(center[0], center[1], marker='^', c=new_colors[a])
plt.scatter(x_star[0], x_star[1], marker='+', c = new_colors[-1])
plt.legend()
plt.show()
#
# plt.figure(4)
# plt.title('Error true env and learned env attack context')
# for a in range(model.n_actions):
# error = np.linalg.norm(val.thetas[:, :, a] - model.thetas[a], axis=2)
# plt.plot(t, np.mean(error, axis=0), label=alg_name + ' arm {}'.format(a))
# low_quantile = np.quantile(error, 0.1, axis=0)
# high_quantile = np.quantile(error, 0.9, axis=0)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
# plt.figure(7)
# plt.title('Error biased env and learned env attack context')
# for a in range(model.n_actions):
# error = np.linalg.norm(val.thetas[:, :, a] - model.thetas[a]/np.maximum(2*np.dot(model.thetas[a], x_star)/np.dot(model.thetas[target_arm], x_star), 1), axis=2)
# plt.plot(t, np.mean(error, axis=0), label=alg_name + ' arm {}'.format(a))
# low_quantile = np.quantile(error, 0.1, axis=0)
# high_quantile = np.quantile(error, 0.9, axis=0)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
# plt.figure(5)
# plt.title('Difference estimated reward for target context {}'.format(target_context))
# for a in range(model.n_actions):
# plt.plot(t, np.mean(val.prod_scalar[:, :, a, target_context], axis=0), label=alg_name + ' arm {}'.format(a))
# low_quantile = np.quantile(val.prod_scalar[:, :, a, target_context], 0.1, axis=0)
# high_quantile = np.quantile(val.prod_scalar[:, :, a, target_context], 0.9, axis=0)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
# plt.figure(6)
# plt.title('Difference estimated reward for a random non target context {}'.format(other_context))
# for a in range(model.n_actions):
# plt.plot(t, np.mean(val.prod_scalar[:, :, a, other_context], axis=0), label=alg_name + ' arm {}'.format(a))
# low_quantile = np.quantile(val.prod_scalar[:, :, a, other_context], 0.1, axis=0)
# high_quantile = np.quantile(val.prod_scalar[:, :, a, other_context], 0.9, axis=0)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
plt.show()
#print('Target arms=', np.mean(np.cumsum(nb_target_arms,axis=1)))
#print('Attack needed arms=', np.mean(np.cumsum(nb_attack_needed,axis=1))) | ContextualBanditsAttacks-main | isoexp/main_attack_one_user.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
sys.path.append('/isoexp')
import numpy as np
import isoexp.mab.arms as arms
import pickle
from isoexp.mab.smab_algs import UCB1, EXP3_IX, attacked_UCB1, attacked_EXP3_IX, EXP3_P, attacked_EXP3_P, FTRL, attacked_FTRL
from matplotlib import rc
import json
import datetime
rc('text', usetex=True)
import matplotlib.pyplot as plt
from tqdm import tqdm
from collections import namedtuple
from cycler import cycler
n = 6 # Number of colors
new_colors = [plt.get_cmap('Set1')(1. * i / n) for i in range(n)]
linestyle_cycler = cycler('linestyle', ['-', '--', ':', '-.', '-', '--'])
plt.rc('axes', prop_cycle=(cycler('color', new_colors) + linestyle_cycler))
plt.rc('lines', linewidth=2)
MABResults = namedtuple('MABResults', 'regret, cum_rewards, attacks, times_of_attacks')
random_state = np.random.randint(0, 123123)
K = 5
MAB = []
means = np.random.uniform(low=0.25, high=0.75, size=K)
#means = np.array([0.47823152, 0.70243227, 0.64504063, 0.65679234, 0.49546542,
# 0.46417188, 0.64736977, 0.71255566, 0.66844984, 0.26030838])
for k in range(K) :
MAB.append(arms.ArmBeta(a=8*means[k], b=8*(1-means[k])))
#MAB.append(arms.ArmBernoulli(p=means[k]))
nb_arms = len(MAB)
print('means: {}'.format(means))
mu_max = np.max(means)
a_star = np.argmin(means)
T = 1*10**4 # horizon
nb_simu = 2
eta = np.sqrt(1/(K*T))
# eta = 0.01
gamma = eta/2
settings = {
"T": T,
"nb_simu": nb_simu,
"random_state": random_state,
"K": K,
}
algorithms = {
'INF': lambda T, MAB: FTRL(T, MAB, eta=eta, alg='inf'),
'Attacked INF': lambda T, MAB: attacked_FTRL(T, MAB, target_arm=a_star, eta=eta, alg='inf'),
'FTRL log barrier' : lambda T, MAB: FTRL(T, MAB, eta=eta, alg='log_barrier'),
'Attacked FTRL log barrier': lambda T, MAB: attacked_FTRL(T, MAB, target_arm=a_star, eta=eta,
alg='log_barrier'),
'UCB': lambda T, MAB: UCB1(T, MAB, alpha=1),
'Attacked UCB': lambda T, MAB: attacked_UCB1(T, MAB, target_arm = a_star, alpha=1., delta=0.99),
# 'EXP3-IX': lambda T, MAB: EXP3_IX(T, MAB, eta=eta, gamma=gamma),
# 'Attacked EXP3-IX': lambda T, MAB: attacked_EXP3_IX(T, MAB, target_arm=a_star),
'EXP3': lambda T, MAB: EXP3_P(T, MAB, eta=np.sqrt(np.log(K)/(T*K))),
'Attacked EXP3': lambda T, MAB: attacked_EXP3_P(T, MAB, target_arm=a_star, eta=np.sqrt(np.log(K)/(T*K))),
# 'EXP3.P Gamma 0.1': lambda T, MAB: EXP3_P(T, MAB, gamma=0.1, eta=np.sqrt(np.log(K)/(K*T))),
# 'Attacked EXP3.P Gamma 0.1': lambda T, MAB: attacked_EXP3_P(T, MAB, target_arm=a_star, gamma=0.1, eta=np.sqrt(np.log(K)/(K*T)))
}
results = []
full_algo = algorithms
for alg_name in full_algo.keys():
alg = full_algo[alg_name]
regret = np.zeros((nb_simu, T))
rwds = 0*regret
times = 0*regret
attacks = 0*regret
for k in tqdm(range(nb_simu), desc='Simulating {}'.format(alg_name)):
try:
rewards, draws = alg(T, MAB)
except ValueError:
rewards, draws, att, times_of_att = alg(T, MAB)
attacks[k] = np.cumsum(att)
times[k] = times_of_att
rwds[k] = np.cumsum(means[draws.astype('int')])
regret[k] = max(means) * np.arange(1, T + 1) - rwds[k]
results += [(alg_name, MABResults(regret=regret, cum_rewards=rwds, attacks=attacks, times_of_attacks=times))]
# id = '{:%Y%m%d_%H%M%S}_{}'.format(datetime.datetime.now(), 'GLM')
# with open("{}_{}_MAB_illustration.pickle".format(id, "SEQ"), "wb") as f:
# pickle.dump(results, f)
# with open("{}_{}_MAB_illustration_settings.json".format(id, "SEQ"), "w+") as f:
# json.dump(settings, f)
t = np.arange(0, T)
for alg_name, val in results:
mean_regret = np.mean(val.regret, axis=0)
low_quantile_regret = np.quantile(val.regret, 0.25, axis=0)
high_quantile_regret = np.quantile(val.regret, 0.75, axis=0)
rwds = np.mean(val.cum_rewards, axis=0)
low_quantile_rwds = np.quantile(val.cum_rewards, 0.25, axis=0)
high_quantile_rwds = np.quantile(val.cum_rewards, 0.75, axis=0)
# plt.figure(1)
# plt.title('Rewards')
# plt.plot(rwds, label=alg_name)
# plt.legend()
# plt.fill_between(t, low_quantile_rwds, high_quantile_rwds, alpha=0.15)
plt.figure(2)
plt.title('Regret Adv alg')
plt.plot(mean_regret, label=alg_name)
plt.legend()
plt.fill_between(t, low_quantile_regret, high_quantile_regret, alpha=0.15)
if 'Attacked' in alg_name:
plt.figure(3)
cum_sum_attacks = np.mean(np.abs(val.attacks), axis=0)
low_quantile_attacks = np.quantile(np.abs(val.attacks), 0.25, axis=0)
high_quantile_attacks = np.quantile(np.abs(val.attacks), 0.75, axis=0)
plt.title('Cumulative sum of attacks Adv alg')
plt.plot(cum_sum_attacks, label=alg_name)
plt.legend()
plt.fill_between(t, low_quantile_attacks, high_quantile_attacks, alpha=0.15)
# plt.figure(2)
# rep = np.random.randint(low=0, high=nb_simu)
# times_to_consider = val.times_of_attacks[rep]
# plt.scatter(t[times_to_consider == 1], val.regret[rep, times_to_consider == 1])
plt.figure(4)
plt.title('Number of attacks Adv alg')
number_of_attacks = np.mean(np.cumsum(val.times_of_attacks, axis=1), axis=0)/t
high_quantile = np.quantile(np.cumsum(val.times_of_attacks, axis=1), 0.75, axis=0)/t
low_quantile = np.quantile(np.cumsum(val.times_of_attacks, axis=1), 0.25, axis=0)/t
plt.plot(number_of_attacks, label=alg_name)
plt.legend()
plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
plt.show()
#import tikzplotlib
#tikzplotlib.save("lcb_worst.tex")
| ContextualBanditsAttacks-main | isoexp/main_mab.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pickle
import tikzplotlib
import os
import shutil
import sys
from cycler import cycler
import tarfile
import json
import re
sys.path.append('/private/home/broz/workspaces/bandits_attacks')
def tardir(path, tar_name):
with tarfile.open(tar_name, "w:gz") as tar_handle:
for root, dirs, files in os.walk(path):
for file in files:
tar_handle.add(os.path.join(root, file))
def get_eps(name):
temp_eps = re.findall(r'[\d\.\d]+', name)
temp_eps = np.array(list(map(float, temp_eps)))
temp_eps = temp_eps[temp_eps <= 1]
temp_eps = temp_eps[0]
return temp_eps
def get_name(name):
first, rest = name.split(' ', 1)
return first
n = 9 # Number of colors
new_colors = [plt.get_cmap('Set1')(1. * i / n) for i in range(n)]
linestyle_cycler = cycler('linestyle', ['-', '--', ':', '-.', '-', '--', ':', '-.', '-'])
plt.rc('axes', prop_cycle=(cycler('color', new_colors) + linestyle_cycler))
plt.rc('lines', linewidth=2)
if len(sys.argv) == 2:
filename = sys.argv[1]
else:
filename = '20200121_153844_PAR_contextual_attacks_rewards.pickle'
with open(filename, 'rb') as f:
results = pickle.load(f)
print("Opening file %s..." % filename)
setting_name = filename[:-7] + '_settings.json'
print('Opening settings %s...' % setting_name)
with open(setting_name, 'r') as f:
settings = json.load(f)
folder = filename.split('.')[0]
if os.path.exists(folder):
shutil.rmtree(folder)
os.makedirs(folder)
print("Done.\n")
with open(os.path.join(folder, setting_name), 'w') as f:
json.dump(settings, f)
EVERY = 500
LW = 2
LATEX = True
nb_models = settings["nb_models"]
nb_simu = settings["nb_simu"]
real_T = settings["T"]
frequency = settings['frequency']
T = real_T // frequency
attack_parameter = settings["epsilon_tested"]
eps_plot_regret = attack_parameter[np.random.randint(low=0, high=len(attack_parameter))]
print("Generating regret and cost figures ...")
# select "bad" model
algorithms = {}
attacked_algorithms = {}
stationary_alg = {}
for alg_name, res in results[0][1]:
if not 'attacked' in alg_name:
algorithms[alg_name] = {'regret': np.zeros((nb_models, nb_simu, T)),
'cost': np.zeros((nb_models, nb_simu, T)),
'target_draws': np.zeros((nb_models, nb_simu, T)),
'rewards_range': np.zeros((nb_models, nb_simu, T))}
elif 'gamma' in alg_name:
eps = get_eps(alg_name)
shortened_alg_name = get_name(alg_name)
attacked_algorithms[(shortened_alg_name, eps)] = {'regret': np.zeros((nb_models, nb_simu, T)),
'cost': np.zeros((nb_models, nb_simu, T)),
'target_draws': np.zeros((nb_models, nb_simu, T)),
'rewards_range': np.zeros((nb_models, nb_simu, T))}
else:
pass
shortened_alg_name = get_name(alg_name)
stationary_alg[shortened_alg_name] = {'regret': np.zeros((nb_models, nb_simu, T)),
'cost': np.zeros((nb_models, nb_simu, T)),
'target_draws': np.zeros((nb_models, nb_simu, T)),
'rewards_range': np.zeros((nb_models, nb_simu, T))}
for m in range(nb_models):
res = results[m][1]
for i, val in enumerate(res):
alg_name = val[0]
val = val[1]
print(val['regret'])
if not 'attacked' in alg_name:
algorithms[alg_name]['regret'][m, :, :] = val['regret']
algorithms[alg_name]['cost'][m, :, :] = val['attack_cond']
algorithms[alg_name]['target_draws'][m, :, :] = val['target_draws']
algorithms[alg_name]['rewards_range'][m, :, :] = val['range_rewards']
elif 'gamma' in alg_name:
eps = get_eps(alg_name)
shortened_alg_name = get_name(alg_name)
attacked_algorithms[(shortened_alg_name, eps)]['regret'][m, :, :] = val['regret']
attacked_algorithms[(shortened_alg_name, eps)]['cost'][m, :, :] = val['attack_cond']
attacked_algorithms[(shortened_alg_name, eps)]['target_draws'][m, :, :] = val['target_draws']
attacked_algorithms[(shortened_alg_name, eps)]['rewards_range'][m, :, :] = val['range_rewards']
else:
shortened_alg_name = get_name(alg_name)
stationary_alg[shortened_alg_name]['regret'][m, :, :] = val['regret']
stationary_alg[shortened_alg_name]['cost'][m, :, :] = val['attack_cond']
stationary_alg[shortened_alg_name]['target_draws'][m, :, :] = val['target_draws']
stationary_alg[shortened_alg_name]['rewards_range'][m, :, :] = val['range_rewards']
plt.figure(1)
t = np.linspace(0, T - 1, T, dtype='int') * frequency
rep = nb_models * nb_simu
for alg_name, res in algorithms.items():
#Plot the regret ofr the normal alg
res['regret'] = res['regret'].cumsum(axis=2)
mean_regret = np.mean(res['regret'], axis=(0, 1))
std = np.std(res['regret'], axis=(0, 1))/np.sqrt(rep)
plt.plot(t[::EVERY], mean_regret[::EVERY], linewidth=LW, label=alg_name)
<<<<<<< HEAD
=======
plt.fill_between(t[::EVERY], mean_regret[::EVERY] - 2 * std[::EVERY], mean_regret[::EVERY] + 2 * std[::EVERY],
alpha=0.15)
#Plot the regret for the attacked algorithms
regret = attacked_algorithms[(alg_name, eps_plot_regret)]['regret'].cumsum(axis=2)
mean_regret = np.mean(regret, axis=(0, 1))
std = np.std(regret, axis=(0, 1))/np.sqrt(rep)
plt.plot(t[::EVERY], mean_regret[::EVERY], linewidth=LW, label=alg_name + ' attacked eps {:.2f}'.format(eps_plot_regret))
plt.fill_between(t[::EVERY], mean_regret[::EVERY] - 2 * std[::EVERY], mean_regret[::EVERY] + 2 * std[::EVERY],
alpha=0.15)
regret = stationary_alg[alg_name]['regret'].cumsum(axis=2)
mean_regret = np.mean(regret, axis=(0, 1))
std = np.std(regret, axis=(0, 1))/np.sqrt(rep)
plt.plot(t[::EVERY], mean_regret[::EVERY], linewidth=LW, label=alg_name + ' attacked statinary')
>>>>>>> 443bf801ec40dea0146947420af027f021b809a6
plt.fill_between(t[::EVERY], mean_regret[::EVERY] - 2 * std[::EVERY], mean_regret[::EVERY] + 2 * std[::EVERY],
alpha=0.15)
#Plot the regret for the attacked algorithms
# regret = attacked_algorithms[(alg_name, eps_plot_regret)]['regret'].cumsum(axis=2)
# mean_regret = np.mean(regret, axis=(0, 1))
# std = np.std(regret, axis=(0, 1))/np.sqrt(rep)
# plt.plot(t[::EVERY], mean_regret[::EVERY], linewidth=LW, label=alg_name + ' attacked eps {:.2f}'.format(eps_plot_regret))
# plt.fill_between(t[::EVERY], mean_regret[::EVERY] - 2 * std[::EVERY], mean_regret[::EVERY] + 2 * std[::EVERY],
# alpha=0.15)
#
# regret = stationary_alg[alg_name]['regret'].cumsum(axis=2)
# mean_regret = np.mean(regret, axis=(0, 1))
# std = np.std(regret, axis=(0, 1))/np.sqrt(rep)
# plt.plot(t[::EVERY], mean_regret[::EVERY], linewidth=LW, label=alg_name + ' attacked statinary')
# plt.fill_between(t[::EVERY], mean_regret[::EVERY] - 2 * std[::EVERY], mean_regret[::EVERY] + 2 * std[::EVERY],
# alpha=0.15)
plt.title('Cumulative regret')
plt.legend()
plt.savefig(os.path.join(folder, "avg_regret.png"))
if LATEX:
tikzplotlib.save(os.path.join(folder, "avg_regret.tex"))
plt.figure(2)
for alg_name, res in algorithms.items():
# #Plot the regret ofr the normal alg
# res['cost'] = res['cost'].cumsum(axis=2)
# mean_cost = np.mean(res['cost'], axis=(0, 1))
# std = np.std(res['cost'], axis=(0, 1))/np.sqrt(rep)
# plt.plot(t[::EVERY], mean_cost[::EVERY], linewidth=LW, label=alg_name)
# plt.fill_between(t[::EVERY], mean_cost[::EVERY] - 2 * std[::EVERY], mean_cost[::EVERY] + 2 * std[::EVERY],
# alpha=0.15)
#Plot the regret for the attacked algorithms
cost = attacked_algorithms[(alg_name, eps_plot_regret)]['cost'].cumsum(axis=2)
mean_cost = np.mean(cost, axis=(0, 1))
std = np.std(cost, axis=(0, 1))/np.sqrt(rep)
plt.plot(t[::EVERY], mean_cost[::EVERY], linewidth=LW, label=alg_name + ' attacked eps {:.2f}'.format(eps_plot_regret))
plt.fill_between(t[::EVERY], mean_cost[::EVERY] - 2 * std[::EVERY], mean_cost[::EVERY] + 2 * std[::EVERY],
alpha=0.15)
cost = stationary_alg[alg_name]['cost'].cumsum(axis=2)
mean_cost = np.mean(cost, axis=(0, 1))
std = np.std(cost, axis=(0, 1))/np.sqrt(rep)
plt.plot(t[::EVERY], mean_cost[::EVERY], linewidth=LW, label=alg_name + ' attacked stationary')
plt.fill_between(t[::EVERY], mean_cost[::EVERY] - 2 * std[::EVERY], mean_cost[::EVERY] + 2 * std[::EVERY],
alpha=0.15)
plt.title('Total attack cost')
plt.legend()
plt.savefig(os.path.join(folder, "avg_cost.png"))
if LATEX:
tikzplotlib.save(os.path.join(folder, "avg_cost.tex"))
plt.figure(3)
for alg_name, res in algorithms.items():
# #Plot the regret ofr the normal alg
# res['target_draws'] = res['target_draws'].cumsum(axis=2)
# mean_draws = np.mean(res['target_draws'], axis=(0, 1))
# std = np.std(res['target_draws'], axis=(0, 1))/np.sqrt(rep)
# plt.plot(t[::EVERY], mean_draws[::EVERY], linewidth=LW, label=alg_name)
# plt.fill_between(t[::EVERY], mean_draws[::EVERY] - 2 * std[::EVERY], mean_draws[::EVERY] + 2 * std[::EVERY],
# alpha=0.15)
draws = attacked_algorithms[(alg_name, eps_plot_regret)]['target_draws'].cumsum(axis=2)
mean_draws = np.mean(draws, axis=(0, 1))
std = np.std(draws, axis=(0, 1))/np.sqrt(rep)
plt.plot(t[::EVERY], mean_draws[::EVERY], linewidth=LW, label=alg_name + ' attacked eps {:.2f}'.format(eps_plot_regret))
plt.fill_between(t[::EVERY], mean_draws[::EVERY] - 2 * std[::EVERY], mean_draws[::EVERY] + 2 * std[::EVERY],
alpha=0.15)
draws = stationary_alg[alg_name]['target_draws'].cumsum(axis=2)
mean_draws = np.mean(draws, axis=(0, 1))
std = np.std(draws, axis=(0, 1))/np.sqrt(rep)
plt.plot(t[::EVERY], mean_draws[::EVERY], linewidth=LW, label=alg_name + ' attacked stationary'.format(eps_plot_regret))
plt.fill_between(t[::EVERY], mean_draws[::EVERY] - 2 * std[::EVERY], mean_draws[::EVERY] + 2 * std[::EVERY],
alpha=0.15)
plt.title('Total target arm draws')
plt.legend()
plt.savefig(os.path.join(folder, "avg_draws.png"))
if LATEX:
tikzplotlib.save(os.path.join(folder, "avg_draws.tex"))
print("Generating impact of epsilon figure")
regrets_mean = {}
costs_mean = {}
draws_mean = {}
regrets_std = {}
costs_std = {}
draws_std = {}
for alg_name in algorithms.keys():
list_r_mean = []
list_c_mean = []
list_d_mean = []
list_r_std = []
list_c_std = []
list_d_std = []
for eps in attack_parameter:
r = attacked_algorithms[(alg_name, eps)]['regret'].cumsum(axis=2)[:, :, -1]
std_r = np.std(r)/np.sqrt(rep)
mean_r = np.mean(r)
c = attacked_algorithms[(alg_name, eps)]['cost'].cumsum(axis=2)[:, :, -1]
std_c = np.std(c)/np.sqrt(rep)
mean_c = np.mean(c)
d = attacked_algorithms[(alg_name, eps)]['target_draws'].cumsum(axis=2)[:, :, -1]
std_d = np.std(d)/np.sqrt(rep)
mean_d = np.mean(d)
list_r_mean.append(mean_r)
list_c_mean.append(mean_c)
list_d_mean.append(mean_d)
list_r_std.append(std_r)
list_c_std.append(std_c)
list_d_std.append(std_d)
regrets_mean[alg_name] = np.array(list_r_mean)
costs_mean[alg_name] = np.array(list_c_mean)
draws_mean[alg_name] = np.array(list_d_mean)
regrets_std[alg_name] = np.array(list_r_std)
costs_std[alg_name] = np.array(list_c_std)
draws_std[alg_name] = np.array(list_d_std)
plt.figure(4)
plt.title('Cost as a function of attack parameter at T={}'.format(T))
for alg_name in algorithms.keys():
c = costs_mean[alg_name]
std = costs_std[alg_name]
plt.plot(attack_parameter, c, linewidth=LW, label=alg_name)
plt.fill_between(attack_parameter, c - 2 * std, c + 2 * std, alpha=0.15)
plt.legend()
plt.savefig(os.path.join(folder, "cost_epsilon.png"))
plt.figure(5)
plt.title('Regret as a function of attack parameter at T={}'.format(T))
for alg_name in algorithms.keys():
r = regrets_mean[alg_name]
std = regrets_std[alg_name]
plt.plot(attack_parameter, r, linewidth=LW, label=alg_name)
plt.fill_between(attack_parameter, r - 2 * std, r + 2 * std, alpha=0.15)
plt.legend()
plt.savefig(os.path.join(folder, "regret_epsilon.png"))
plt.figure(6)
plt.title('Target draws as a function of attack parameter at T={}'.format(T))
for alg_name in algorithms.keys():
d = draws_mean[alg_name]
std = draws_std[alg_name]
plt.plot(attack_parameter, d, linewidth=LW, label=alg_name)
plt.fill_between(attack_parameter, d - 2 * std, d + 2 * std, alpha=0.15)
plt.legend()
plt.savefig(os.path.join(folder, "draws_epsilon.png"))
for eps in attack_parameter:
rewards = np.array([])
for alg_name in algorithms.keys():
rewards = np.concatenate((rewards, attacked_algorithms[(alg_name, eps)]['rewards_range']), axis=None)
print('-'*100)
print('The maximum reward for epsilon = {:.2f} is:'.format(eps), np.max(rewards))
print('The minimum reward for epsilon = {:.2f} is:'.format(eps), np.min(rewards))
print('The mean reward for epsilon = {:.2f} is:'.format(eps), np.mean(rewards))
print('The median reward for epsilon = {:.2f} is:'.format(eps), np.median(rewards))
print('The 25% quantile reward for epsilon = {:.2f} is:'.format(eps), np.quantile(rewards, 0.25))
print('The 75% quantile reward for epsilon = {:.2f} is:'.format(eps), np.quantile(rewards, 0.75))
print('The perctange reward over 1 for epsilon = {:.2f} is:'.format(eps), np.sum(rewards > 1)/len(rewards))
print('The perctange reward below 0 for epsilon = {:.2f} is:'.format(eps), np.sum(rewards < 0) / len(rewards))
| ContextualBanditsAttacks-main | isoexp/parse_reward_attack.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from isoexp.contextual.contextual_models import RandomContextualLinearArms
from isoexp.contextual.contextual_linucb import *
from tqdm import trange
from collections import namedtuple
from cycler import cycler
import matplotlib.pyplot as plt
import cvxpy as cp
class exp(object):
def __init__(self, nb_arms, type='random', a_star = 0):
self.K = nb_arms
self.type= type
self.a_star = a_star
def get_action(self, context):
if self.type == 'random':
return np.ones((self.K,))/self.K
elif self.type == 'optimal':
means = np.dot(model.thetas, context)
a = np.argmax(means)
proba = np.zeros((self.K,))
proba[a] = 1
return proba
else:
proba = np.zeros((self.K,))
proba[self.a_star] = 1
return proba
n = 6 # Number of colors
new_colors = [plt.get_cmap('Set1')(1. * i / n) for i in range(n)]
linestyle_cycler = cycler('linestyle', ['-', '--', ':', '-.', '-', '--'])
plt.rc('axes', prop_cycle=(cycler('color', new_colors) + linestyle_cycler))
plt.rc('lines', linewidth=2)
MABResults = namedtuple('MABResults', 'regret, attack_cond, target_draws, thetas, prod_scalar')
seed = np.random.randint(0, 10 ** 5)
print('seed = ', seed)
noise = 0.1
attack_parameter = 1/2
model = RandomContextualLinearArms(n_actions=5, n_features=10, noise=noise, random_state=seed, bound_context=1)
theta_bound = np.max(np.linalg.norm(model.thetas, 2, axis=(1)))
# target_context = np.random.randint(low=0, high=len(model.context_lists))
# other_context = np.random.randint(low=0, high=len(model.context_lists))
# while other_context == target_context:
# other_context = np.random.randint(low=0, high=len(model.context_lists))
# target_arm = np.random.randint(low=0, high=model.n_actions)
target_arm = np.argmax(np.dot(model.thetas, model.context_lists[-1]))
T = 5000
nb_simu = 30
M = 10
print('a_star=', target_arm)
eta = np.sqrt(2*np.log(M)/(T*model.n_actions))
experts = []
for i in range(M-2):
experts.append(exp(nb_arms=model.n_actions, type='random'))
experts.append(exp(nb_arms=model.n_actions, type='optimal'))
experts.append(exp(nb_arms=model.n_actions, type='', a_star=int(target_arm)))
algorithms = {
'Exp4': Exp4(nb_arms=model.n_actions, dimension=model.n_features, experts=experts, eta=eta, gamma=10**-3)
}
results = []
for alg_name, alg in algorithms.items():
regret = np.zeros((nb_simu, T))
draws = np.zeros((nb_simu, T))
epsilon_norm = np.zeros((nb_simu, T))
thetas_alg = np.zeros((nb_simu, T, model.n_actions, model.n_features))
prod_scalar = np.zeros((nb_simu, T, model.n_actions, model.n))
for k in trange(nb_simu, desc='Simulating {}'.format(alg_name)):
alg.reset()
for t in range(T):
context = model.get_context()
a_t = alg.get_action(context)
r_t = model.reward(context, a_t)
attack_t = 0
epsilon_norm[k, t] = np.abs(attack_t)
alg.update(context, a_t, r_t + attack_t)
# try:
# thetas_alg[k, t] = alg.thetas_hat
# except:
# pass
# for a in range(model.n_actions):
# for i, x in enumerate(model.context_lists):
# p = np.dot(alg.thetas_hat[a], x) - (1 - attack_parameter)*np.dot(model.thetas[target_arm], x)
# prod_scalar[k, t, a, i] = p
regret[k, t] = model.best_arm_reward(context) - np.dot(model.thetas[a_t], context)
draws[k, t] = a_t
results += [(alg_name, MABResults(regret=regret, attack_cond=epsilon_norm, target_draws=draws,
thetas=thetas_alg, prod_scalar=prod_scalar))]
print('Target arm =', target_arm)
print('draws = ', np.mean(np.cumsum(draws == target_arm, axis=1),axis=0))
for i,(alg_name, val) in enumerate(results):
mean_regret = np.mean(val.regret.cumsum(axis=1), axis=0)
t = np.linspace(0, T, T, dtype='int')
low_quantile = np.quantile(val.regret.cumsum(axis=1), 0.1, axis=0)
high_quantile = np.quantile(val.regret.cumsum(axis=1), 0.9, axis=0)
plt.figure(0)
plt.title('Cumulative Regret')
plt.plot(mean_regret, label=alg_name)
plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
plt.legend()
# mean_condition = np.mean(np.cumsum(val.target_draws == target_arm, axis=1), axis=0)
# low_quantile = np.quantile(val.attack_cond, 0.1, axis=0)
# high_quantile = np.quantile(val.attack_cond, 0.9, axis=0)
# plt.figure(1)
# plt.title('Draws target arm')
# plt.plot(mean_condition, label=alg_name)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
# plt.figure(2)
# plt.title('Cumulative attack norm attacked reward')
# if 'Attacked' in alg_name:
# plt.plot(np.mean(np.cumsum(val.attack_cond, axis=1), axis=0), label=alg_name)
# low_quantile = np.quantile(np.cumsum(val.attack_cond, axis=1), 0.1, axis=0)
# high_quantile = np.quantile(np.cumsum(val.attack_cond, axis=1), 0.9, axis=0)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
#
#
plt.figure(4)
plt.title('Error true env and learned env')
for a in range(model.n_actions):
error = np.linalg.norm(val.thetas[:, :, a] - model.thetas[a], axis=2)
plt.plot(t, np.mean(error, axis=0), label=alg_name + ' arm {}'.format(a))
low_quantile = np.quantile(error, 0.1, axis=0)
high_quantile = np.quantile(error, 0.9, axis=0)
plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
plt.legend()
# if 'weak' in alg_name:
# plt.figure(6)
# plt.title('Difference estimated reward random context {}'.format(other_context))
# for a in range(model.n_actions):
# plt.plot(t, np.mean(val.prod_scalar[:, :, a, other_context], axis=0), label=alg_name + ' arm {}'.format(a))
# low_quantile = np.quantile(val.prod_scalar[:, :, a, other_context], 0.1, axis=0)
# high_quantile = np.quantile(val.prod_scalar[:, :, a, other_context], 0.9, axis=0)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
#
# if not 'weak' in alg_name:
# plt.figure(7)
# plt.title('Error biased env and learned env')
# for a in range(model.n_actions):
# error = np.linalg.norm(val.thetas[:, :, a] - model.thetas[target_arm]*(1 - attack_parameter), axis=2)
# plt.plot(t, np.mean(error, axis=0), label=alg_name + ' arm {}'.format(a))
# low_quantile = np.quantile(error, 0.1, axis=0)
# high_quantile = np.quantile(error, 0.9, axis=0)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
# plt.figure(8)
# plt.title('Number of pulls target arm attack context')
# plt.plot(t, np.mean(np.cumsum(val.target_draws == target_arm, axis=1), axis=0), label=alg_name + ' arm {}'.format(a))
# low_quantile = np.quantile(np.cumsum(val.target_draws == target_arm, axis=1), 0.1, axis=0)
# high_quantile = np.quantile(np.cumsum(val.target_draws == target_arm, axis=1), 0.9, axis=0)
# plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
# plt.legend()
plt.show()
#print('Target arms=', np.mean(np.cumsum(nb_target_arms,axis=1)))
#print('Attack needed arms=', np.mean(np.cumsum(nb_attack_needed,axis=1))) | ContextualBanditsAttacks-main | isoexp/main_attack_reward.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
__version__ = '0.0.dev0' | ContextualBanditsAttacks-main | isoexp/contextual/__init__.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import cvxpy as cp
from scipy.optimize import minimize
class RandomArm(object):
def __init__(self, initial_arms):
self.arms = initial_arms
def get_action(self):
return np.random.choice(self.arms)
def update(self, a_t, r_t):
pass
def reset(self):
pass
class ContextualLinearBandit(object):
def __init__(self, nb_arms, dimension, reg_factor=1., delta=0.99,
bound_features=None, noise_variance=None, bound_context=None, alpha=None):
self.K = nb_arms
self.dim = dimension
self.reg_factor = reg_factor
self.delta = delta
self.exploration_coeff = alpha
self.iteration = None
self.bound_context = bound_context
self.bound_features = bound_features
self.noise_variance = noise_variance
self.reset()
def reset(self):
d = self.dim
self.thetas_hat = np.zeros((self.K, d))
self.inv_design_matrices = np.zeros((self.K, d, d))
self.bs = self.thetas_hat.copy()
for arm in range(self.K):
self.inv_design_matrices[arm] = np.eye(d, d) / self.reg_factor
# self.range = 1
# self.est_bound_theta = 0
# self.est_bound_features = 0
self.n_samples = np.zeros((self.K,))
self.iteration = 0
@property
def n_actions(self):
return self.K
@property
def n_features(self):
return self.dim
# def auto_alpha(self):
# d = self.n_features
# sigma, B, D = self.noise_variance, self.bound_theta, self.bound_features
# return sigma * np.sqrt(d * np.log((1 + max(1, self.iteration - 1) * D * D / self.reg_factor) / self.delta)) \
# + np.sqrt(self.reg_factor) * B
def alpha(self):
d = self.dim
# print(d)
sigma, B, D = self.noise_variance, self.bound_context, self.bound_features
if self.exploration_coeff is None:
return sigma * np.sqrt(d * np.log((1 + np.maximum(1, self.n_samples) * B * B / self.reg_factor) / self.delta)) \
+ np.sqrt(self.reg_factor) * D
else:
return self.exploration_coeff
def get_action(self, context):
self.iteration += 1
# Let's not be biased with tiebreaks, but add in some random noise
noise = np.random.random(self.K) * 0.000001
estimate = np.zeros((self.K,))
sfactor = self.alpha()
for arm in range(self.K):
Ainv = self.inv_design_matrices[arm]
# print(Ainv)
b = self.bs[arm]
theta_hat = np.dot(Ainv, b)
self.thetas_hat[arm] = theta_hat
ta = np.dot(context, np.dot(Ainv, context))
sfactor = self.alpha()
# print('sfactor =', sfactor)
# print('context = ', context)
# print('theta_hat=', theta_hat)
# print('ta = ', ta)
estimate[arm] = np.dot(context, theta_hat) + sfactor[arm] * np.sqrt(ta)
ucb = estimate + noise
choice = np.argmax(ucb) # choose the highest
return choice
def update(self, context, a_t, r_t):
self.inv_design_matrices[a_t] = self.inv_design_matrices[a_t] - \
np.dot(self.inv_design_matrices[a_t], np.dot(np.outer(context, context),
self.inv_design_matrices[a_t])) \
/ (1. + np.dot(context.T, np.dot(self.inv_design_matrices[a_t], context)))
self.bs[a_t] += r_t * context
self.n_samples[a_t] += 1
self.thetas_hat[a_t] = np.dot(self.inv_design_matrices[a_t], self.bs[a_t])
class ContextualLinearTS(object):
def __init__(self, nb_arms, dimension, reg_factor=1., delta=0.99, noise_variance=None):
self.K = nb_arms
self.dim = dimension
self.delta = delta
self.reg_factor = reg_factor
self.noise_variance = noise_variance
self.reset()
def reset(self):
d = self.dim
self.thetas_hat = np.zeros((self.K, d))
self.inv_design_matrices = np.zeros((self.K, d, d))
self.bs = self.thetas_hat.copy()
for arm in range(self.K):
self.inv_design_matrices[arm] = np.eye(d, d) / self.reg_factor
self.n_samples = np.zeros((self.K,))
self.iteration = 0
self.thetas = self.thetas_hat
@property
def n_actions(self):
return self.K
@property
def n_features(self):
return self.dim
def get_action(self, context, deterministic=False):
self.iteration += 1
estimate = np.zeros((self.K,))
nu = self.noise_variance*np.sqrt(self.dim*np.log(self.iteration/self.delta)/2)
for arm in range(self.K):
Ainv = self.inv_design_matrices[arm]
b = self.bs[arm]
theta_hat = np.dot(Ainv, b)
self.thetas_hat[arm] = theta_hat
mean = np.dot(self.thetas_hat[arm], context)
variance = nu**2 * np.dot(context, np.dot(Ainv, context))
estimate[arm] = mean + np.sqrt(variance) * (0 if deterministic else np.random.randn())
ucb = estimate
choice = np.argmax(ucb) # choose the highest
return choice
def update(self, context, a_t, r_t):
self.inv_design_matrices[a_t] = self.inv_design_matrices[a_t] - \
np.dot(self.inv_design_matrices[a_t], np.dot(np.outer(context, context),
self.inv_design_matrices[a_t])) \
/ (1. + np.dot(context.T, np.dot(self.inv_design_matrices[a_t], context)))
self.bs[a_t] += r_t * context
self.n_samples[a_t] += 1
class contextEpsGREEDY():
"""
Args:
T (int): horizon
arms (list): list of available arms
Returns:
rewards (array-like): observed rewards
draws (array-like): indexes of selected arms
"""
def __init__(self, number_arms, dimension, epsilon=0.1, reg_factor=0.1, decrease_epsilon=False):
self.decrease_epsilon = decrease_epsilon
self.epsilon = epsilon
self.K = number_arms
self.dim = dimension
self.rewards = []
self.draws = []
self.reg_factor = reg_factor
self.n_samples = np.ones((self.K,)) # number of observations of each arm
self.sum_rewards = np.zeros((self.K,)) # sum of rewards for each arm
self.thetas_hat = np.zeros((self.K, self.dim))
self.inv_design_matrices = [np.identity(self.dim)/self.reg_factor for _ in range(number_arms)]
self.bs = np.zeros((self.K, self.dim))
self.nb_iter = 0
self.reset()
def reset(self):
d = self.dim
self.thetas_hat = np.zeros((self.K, d))
self.inv_design_matrices = np.zeros((self.K, d, d))
self.bs = self.thetas_hat.copy()
for arm in range(self.K):
self.inv_design_matrices[arm] = np.eye(d, d) / self.reg_factor
self.n_samples = np.zeros((self.K,))
self.nb_iter = 0
def estimated_best_arm(self, context):
return np.argmax(self.thetas_hat.dot(context))
def get_action(self, context):
if self.nb_iter < self.K:
return self.nb_iter
else:
# select the chosen_arm
expected_rewards = self.thetas_hat.dot(context)
rnd = np.random.rand()
if rnd <= self.epsilon / (np.sqrt(self.nb_iter + 1) if self.decrease_epsilon else 1):
chosen_arm = np.random.choice(self.K)
else:
noise = 10**-7*np.random.randn(self.K)
chosen_arm = np.argmax(noise + expected_rewards)
return chosen_arm
def update(self, context, chosen_arm, reward):
# update quantities
self.nb_iter += 1
self.rewards.append(reward)
self.draws.append(chosen_arm)
self.sum_rewards[chosen_arm] += reward
self.n_samples[chosen_arm] += 1
self.inv_design_matrices[chosen_arm] = self.inv_design_matrices[chosen_arm] - np.dot(self.inv_design_matrices[chosen_arm], np.dot(np.outer(context, context),
self.inv_design_matrices[chosen_arm])) \
/ (1. + np.dot(context, np.dot(self.inv_design_matrices[chosen_arm], context)))
self.bs[chosen_arm] += reward * context
self.thetas_hat[chosen_arm] = self.inv_design_matrices[chosen_arm].dot(self.bs[chosen_arm])
return self.rewards, self.draws
class RewardAttacker(object):
def __init__(self, nb_arms, dimension, reg_factor=1., delta=0.99,
bound_features=None, noise_variance=None, bound_context=None, eps=1/2, **kwargs):
self.K = nb_arms
self.dim = dimension
self.reg_factor = reg_factor
self.delta = delta
self.iteration = None
self.bound_context = bound_context
self.bound_features = bound_features
self.noise_variance = noise_variance
self.eps = eps
self.reset()
def reset(self):
d = self.dim
self.thetas_hat = np.zeros((self.K, d))
self.betas = np.zeros((self.K,))
self.inv_design_matrices = np.zeros((self.K, d, d))
self.bs = self.thetas_hat.copy()
for arm in range(self.K):
self.inv_design_matrices[arm] = np.eye(d, d) / self.reg_factor
self.n_samples = np.zeros((self.K,))
self.iteration = 0
@property
def n_actions(self):
return self.K
@property
def n_features(self):
return self.dim
def alpha(self):
d = self.dim
sigma, B, D = self.noise_variance, self.bound_context, self.bound_features
return sigma * np.sqrt(d * np.log((1 + np.maximum(1, self.n_samples) * B * B / self.reg_factor) / self.delta)) \
+ np.sqrt(self.reg_factor) * D
def update(self, context, a_t, r_t):
self.inv_design_matrices[a_t] = self.inv_design_matrices[a_t] - \
np.dot(self.inv_design_matrices[a_t], np.dot(np.outer(context, context),
self.inv_design_matrices[a_t])) \
/ (1. + np.dot(context.T, np.dot(self.inv_design_matrices[a_t], context)))
self.bs[a_t] += r_t * context
self.n_samples[a_t] += 1
self.thetas_hat[a_t] = np.dot(self.inv_design_matrices[a_t], self.bs[a_t])
self.betas = self.alpha()
def compute_attack(self, action, context, a_star):
if action != a_star:
temp_1 = self.betas[action] * np.sqrt(np.dot(context, np.dot(self.inv_design_matrices[action], context)))
temp_2 = self.betas[a_star] * np.sqrt(np.dot(context, np.dot(self.inv_design_matrices[a_star], context)))
att = - min(1, max(np.dot(self.thetas_hat[action], context) + temp_1, 0)) + (1 - self.eps) \
* (min(1, max(0, np.dot(self.thetas_hat[a_star], context) - temp_2)))
return att
else:
return 0
class Exp4(object):
def __init__(self, nb_arms, dimension, experts, eta=0.5, gamma=1):
self.K = nb_arms
self.dim = dimension
self.eta = eta
self.gamma = gamma
self.experts = experts
self.nb_experts = len(experts)
self.reset()
@property
def n_actions(self):
return self.K
@property
def n_features(self):
return self.dim
def reset(self):
self.Q = np.ones((self.nb_experts,))/self.nb_experts
self.iteration = 0
def get_expert_advice(self, context):
proba_matrix = np.zeros((self.nb_experts, self.K))
for m in range(self.nb_experts):
proba_matrix[m] = self.experts[m].get_action(context)
return proba_matrix
def get_action(self, context):
self.iteration += 1
self.E = self.get_expert_advice(context)
self.P = np.dot(self.E.T, self.Q)
#self.P = self.P/np.sum(self.P)
temp = np.linspace(0, self.K-1, self.K, dtype='int')
action = np.random.choice(temp, p=self.P)
return action
def update(self, context, a_t, r_t):
X = np.ones((self.K,))
X[a_t] = X[a_t] - (1 - r_t)/(self.P[a_t] + self.gamma)
X_experts = np.dot(self.E, X)
self.Q = self.Q*np.exp(self.eta*X_experts)
self.Q = self.Q/np.sum(self.Q)
| ContextualBanditsAttacks-main | isoexp/contextual/contextual_linucb.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import pickle
import matplotlib.pyplot as plt
class ContextualLinearMABModel(object):
def __init__(self, random_state=0, noise=0.1, thetas=None):
self.local_random = np.random.RandomState(random_state)
self.noise = noise
self.thetas = thetas
def reward(self, context, action):
assert 0 <= action < self.n_actions, "{} not in 0 .. {}".format(action, self.n_actions)
reward = np.dot(context, self.thetas[action]) + self.noise * self.local_random.randn(1)
return reward
def best_arm_reward(self, context):
D = np.dot(self.thetas, context)
return np.max(D)
@property
def n_features(self):
return self.thetas.shape[1]
@property
def n_actions(self):
return self.thetas.shape[0]
class RandomContextualLinearArms(ContextualLinearMABModel):
def __init__(self, random_state=0, noise=0., n_actions=4, n_features=4, bound_context=1):
self.bound_context = bound_context
thetas = np.abs(np.random.randn(n_actions, n_features-1))
super(RandomContextualLinearArms, self).__init__(random_state=random_state, noise=noise,
thetas=thetas)
self.context_lists = []
self.n_user = 5
self.n = self.n_user
thetas = np.ones((n_actions, n_features))
thetas[:, :-1] = self.thetas.copy()
max_rwd = -float('inf')
min_rwd = float('inf')
for k in range(self.n_user):
test = np.abs(np.random.randn(self.n_features))
test = np.random.uniform(low=0, high=bound_context)*test/np.linalg.norm(test)
dot_prod = np.dot(self.thetas, test)
maxi = np.max(dot_prod)
mini = np.min(dot_prod)
if maxi >= max_rwd:
max_rwd = maxi
if mini <= min_rwd:
min_rwd = mini
self.context_lists.append(np.concatenate((test, np.array([1]))))
self.thetas = thetas
thetas[:, -1] = -min_rwd + 1
thetas = thetas / (max_rwd - min_rwd + 1)
self.thetas = thetas
print('Different Means:')
for k in range(self.n_user):
print('Means for context {}'.format(k), np.dot(thetas, self.context_lists[k]))
self.theta = self.thetas
def get_context(self):
return self.context_lists[np.random.randint(low=0, high=self.n_user)]
class DatasetModel(ContextualLinearMABModel):
def __init__(self, arm_csvfile, user_csvfile, random_state=0, noise=0., arms_limit=None, context_limit=None):
temp_thetas = np.loadtxt(arm_csvfile, delimiter=',').T
temp_user_contexts = np.loadtxt(user_csvfile, delimiter=',')
K, d = temp_thetas.shape
N, _ = temp_user_contexts.shape
thetas = np.ones((K, d+1))
user_contexts = np.ones((N, d+1))
thetas[:, :-1] = temp_thetas.copy()
if arms_limit is not None:
thetas = thetas[:arms_limit]
user_contexts[:, :-1] = temp_user_contexts.copy()
if context_limit is not None:
user_contexts = user_contexts[:context_limit]
self.bound_context = np.linalg.norm(user_contexts, axis=1).max()
D = np.dot(temp_user_contexts, temp_thetas.T)
min_rwd = np.min(D)
max_rwd = np.max(D)
thetas[:, -1] = -min_rwd + 1
thetas = thetas / (max_rwd - min_rwd + 1)
self.context_lists = user_contexts.copy()
self.n_user, _ = user_contexts.shape
super(DatasetModel, self).__init__(random_state=random_state, noise=noise,
thetas=thetas)
self.theta = self.thetas
def get_context(self):
return self.context_lists[np.random.randint(low=0, high=self.n_user)]
class AttackOneUserModel(ContextualLinearMABModel):
def __init__(self, random_state=0, noise=0., n_actions=4, n_features=4, bound_context=1, distance=1):
self.bound_context = bound_context
thetas = np.abs(np.random.randn(n_actions, n_features))
norm_thetas = np.linalg.norm(thetas, axis=1)
thetas = (1/2) * thetas/norm_thetas.reshape((n_actions, 1))
super(AttackOneUserModel, self).__init__(random_state=random_state, noise=noise,
thetas=thetas)
self.context_lists = []
self.n_user = 1
self.n = self.n_user
self.distance = distance
for k in range(self.n_user):
test = np.abs(np.random.randn(self.n_features))
test = np.random.uniform(low=0, high=bound_context)*test/np.linalg.norm(test)
self.context_lists.append(test)
print('Different Means:')
# for k in range(self.n_user):
# print('Means for context {}'.format(k), np.dot(thetas, self.context_lists[k]))
self.theta = self.thetas
def get_context(self):
return self.context_lists[np.random.randint(low=0, high=self.n_user)]
def add_target_arm(self):
theta_target_arm = np.abs(np.random.randn(self.n_features))
theta_target_arm = self.distance * theta_target_arm/np.linalg.norm(theta_target_arm)
import cvxpy as cp
n_points = len(self.thetas)
lambdas = cp.Variable(n_points)
A = np.ones((1,n_points))
pos = -np.eye(n_points)
constraints = [A@lambdas == 1, pos@lambdas <= 0]
obj = cp.Minimize(cp.quad_form(theta_target_arm - self.thetas.T @ lambdas, np.eye(self.n_features)))
prob = cp.Problem(obj, constraints)
prob.solve()
print('Distance to convex hull', np.sqrt(prob.value))
self.thetas = np.concatenate((self.thetas, theta_target_arm.reshape((1, self.n_features))), axis=0)
if __name__ == '__main__':
import os
# arm_file = os.path.join(os.getcwd(),'../../examples/jester/Vt_jester.csv')
# user_file = os.path.join(os.getcwd(),'../../examples/jester/U.csv')
# test_model = DatasetModel(arm_csvfile=arm_file, user_csvfile=user_file, context_limit=100)
r = np.linspace(0, 1/2)
for rr in r:
test_model = AttackOneUserModel(n_features=2, n_actions=10, distance=rr)
# print(test_model.context_lists)
# print(np.linalg.norm(test_model.thetas,axis=1))
test_model.add_target_arm()
# print(test_model.thetas)
# for x in test_model.context_lists:
# print(np.dot(test_model.thetas, x))
if test_model.n_features == 2:
for a in range(test_model.n_actions-1):
plt.scatter(test_model.thetas[a, 0], test_model.thetas[a, 1], marker='+')
plt.scatter(test_model.thetas[test_model.n_actions - 1, 0], test_model.thetas[test_model.n_actions - 1, 1],
marker='^')
# for x in test_model.context_lists:
# plt.scatter(x[0], x[1], marker='o')
plt.show()
# RandomContextualLinearArms() | ContextualBanditsAttacks-main | isoexp/contextual/contextual_models.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import cvxpy as cp
from scipy.optimize import minimize
class RandomArm(object):
def __init__(self, initial_arms):
self.arms = initial_arms
def get_action(self):
return np.random.choice(self.arms)
def update(self, a_t, r_t):
pass
def reset(self):
pass
class LinearBandit(object):
def __init__(self, arm_features, reg_factor=1., delta=0.5,
bound_theta=None, noise_variance=None):
self.arm_features = arm_features
self.reg_factor = reg_factor
self.delta = delta
self.iteration = None
self.bound_theta = bound_theta
self.bound_features = np.max(np.sqrt(np.sum(np.abs(arm_features) ** 2, axis=1)))
self.noise_variance = noise_variance
self.reset()
def reset(self):
d = self.n_features
self.A = self.reg_factor * np.eye(d, d)
self.b = np.zeros((d,))
self.range = 1
self.est_bound_theta = 0
self.est_bound_features = 0
self.n_samples = 0
self.iteration = 0
@property
def n_actions(self):
return self.arm_features.shape[0]
@property
def n_features(self):
return self.arm_features.shape[1]
def auto_alpha(self):
d = self.n_features
return self.range * np.sqrt(d * np.log((1 + max(1, self.n_samples) / self.reg_factor) / self.delta)) \
+ np.sqrt(self.reg_factor) * np.linalg.norm(self.theta_hat, 2)
def alpha(self, n_samples):
d = self.n_features
if self.bound_theta is None or self.noise_variance is None:
# use estimated quantities
sigma, B, D = self.range, self.est_bound_theta, self.bound_features
else:
sigma, B, D = self.noise_variance, self.bound_theta, self.bound_features
return sigma * np.sqrt(d * np.log((1 + max(1, n_samples) * D * D / self.reg_factor) / self.delta)) \
+ np.sqrt(self.reg_factor) * B
def get_action(self):
self.iteration += 1
# Let's not be biased with tiebreaks, but add in some random noise
noise = np.random.random(self.n_actions) * 0.000001
A_inv = np.linalg.inv(self.A)
self.theta_hat = A_inv.dot(self.b)
ta = np.diag(self.arm_features.dot(A_inv).dot(self.arm_features.T))
sfactor = self.alpha(self.n_samples)
ucb = self.arm_features.dot(self.theta_hat) + sfactor * ta
ucb = ucb + noise
choice = np.argmax(ucb) # choose the highest
# print(ucb, choice)
return choice
def update(self, a_t, r_t):
# update the input vector
phi = self.arm_features[a_t]
self.A += np.outer(phi, phi)
self.b += r_t * phi
self.range = max(self.range, abs(r_t))
self.est_bound_theta = np.linalg.norm(self.theta_hat)
self.n_samples += 1
class EfficientLinearBandit(object):
def __init__(self, arm_features, reg_factor=1., delta=0.5,
bound_theta=None, noise_variance=None):
self.arm_features = arm_features
self.reg_factor = reg_factor
self.delta = delta
self.iteration = None
self.bound_theta = bound_theta
self.bound_features = np.max(np.sqrt(np.sum(np.abs(arm_features) ** 2, axis=1)))
self.noise_variance = noise_variance
self.reset()
def reset(self):
d = self.n_features
self.Ainv = np.eye(d, d) / self.reg_factor
self.b = np.zeros((d,))
self.range = 1
self.est_bound_theta = 0
self.est_bound_features = 0
self.n_samples = 0
self.iteration = 0
@property
def n_actions(self):
return self.arm_features.shape[0]
@property
def n_features(self):
return self.arm_features.shape[1]
def auto_alpha(self):
d = self.n_features
sigma, B, D = self.noise_variance, self.bound_theta, self.bound_features
return sigma * np.sqrt(d * np.log((1 + max(1, self.iteration - 1) * D * D / self.reg_factor) / self.delta)) \
+ np.sqrt(self.reg_factor) * B
def alpha(self, n_samples):
d = self.n_features
if self.bound_theta is None or self.noise_variance is None:
# use estimated quantities
sigma, B, D = self.range, self.est_bound_theta, self.bound_features
else:
sigma, B, D = self.noise_variance, self.bound_theta, self.bound_features
return sigma * np.sqrt(d * np.log((1 + max(1, n_samples) * D * D / self.reg_factor) / self.delta)) \
+ np.sqrt(self.reg_factor) * B
def get_action(self, n_sam=None):
self.iteration += 1
if n_sam is None:
n_sam = self.n_samples
# Let's not be biased with tiebreaks, but add in some random noise
noise = np.random.random(self.n_actions) * 0.000001
# A_inv = np.linalg.inv(self.A)
# assert np.allclose(A_inv, self.Ainv)
self.theta_hat = np.dot(self.Ainv, self.b)
ta = np.diag(np.dot(self.arm_features, np.dot(self.Ainv, self.arm_features.T)))
sfactor = self.alpha(n_sam)
ucb = self.arm_features.dot(self.theta_hat) + sfactor * np.sqrt(ta)
ucb = ucb + noise
choice = np.argmax(ucb) # choose the highest
# print(ucb, choice)
return choice
def update(self, a_t, r_t):
# update the input vector
phi = self.arm_features[a_t]
# self.A += np.outer(phi, phi)
self.Ainv = self.Ainv - np.dot(self.Ainv, np.dot(np.outer(phi, phi), self.Ainv)) / (
1. + np.dot(phi.T, np.dot(self.Ainv, phi)))
self.b += r_t * phi
self.range = max(self.range, abs(r_t))
# self.est_bound_theta = np.linalg.norm(self.theta_hat)
self.n_samples += 1
class UCB_GLM() :
def __init__(self, arm_features, reg_factor = 1, delta = 0.1,
bound_theta = 1,
link_function = lambda x : x,
noise_variance = None,
model = None,
conservative_level=0.1,
tighter_ucb = False,
kappa = None) :
self.conservative_level = conservative_level
self.tighter_ucb = tighter_ucb
self.arm_features = arm_features
self.reg_factor = reg_factor
self.delta = delta
self.bound_theta = bound_theta
self.model = model
self.n_actions, self.d = arm_features.shape
self.noise_variance = noise_variance
if self.model == 'gaussian' :
self.link_function = lambda x : x
self.kappa = 1
self.L = 1
elif self.model == 'bernoulli' :
self.link_function = lambda x : 1/(1+np.exp(-x))
if kappa is None :
self.kappa = 1/1000
else :
self.kappa = kappa
self.L = 1/4
self.reset()
def reset(self) :
self.rewards_history = []
self.features_history = []
self.A = self.reg_factor * np.eye(self.d, self.d)/self.kappa
self.Ainv = np.eye(self.d, self.d)*self.kappa / self.reg_factor
self.n_samples = 0
self.iteration = 0
self.theta_hat = np.zeros(self.d)
def solve_MLE(self, rewards_history, features_history) :
if self.iteration > 1:
if not self.model is None :
n_samples = len(self.rewards_history)
n_features = self.d
X = np.zeros((n_samples, n_features))
X = 1*np.array(self.features_history)
y = (np.array(self.rewards_history).reshape((n_samples,)))
beta = cp.Variable(n_features)
lambd = cp.Parameter(nonneg = True)
lambd.value = self.reg_factor/2
if self.model == 'bernoulli' :
log_likelihood = cp.sum(cp.multiply(y, X @ beta) -
cp.log_sum_exp(cp.vstack([np.zeros(n_samples), X @ beta]), axis=0)) - lambd * cp.norm(beta, 2)
problem = cp.Problem(cp.Maximize(log_likelihood))
problem.solve(verbose = False, warm_start = False, max_iters = 200)
return beta.value
else :
log_likelihood = cp.sum( cp.multiply(y, X @ beta) -
cp.power(X@beta, 2)/2) - lambd * cp.norm(beta, 2)
problem = cp.Problem(cp.Maximize(log_likelihood))
problem.solve(verbose = False, warm_start = False, max_iters = 200)
return beta.value
else :
return np.zeros((self.d,))
def auto_alpha(self, tight_bound):
if tight_bound :
return np.sqrt(2*self.L*np.log(self.n_samples + 1)/self.kappa)
else :
sigma, B = self.noise_variance, self.bound_theta
return np.sqrt(self.reg_factor/self.kappa)*B + sigma*np.sqrt( self.d*np.log(1 + self.iteration*self.kappa/(self.reg_factor*self.d)) + 2*np.log(1/self.delta))/self.kappa
def get_action(self) :
self.iteration += 1
noise = np.random.random(self.n_actions) * 0.0000001
self.theta_hat = self.solve_MLE(self.rewards_history, self.features_history)
beta = self.auto_alpha(self.tighter_ucb)
ta = np.diag(np.dot(self.arm_features, np.dot(self.Ainv, self.arm_features.T)))
ucb = self.arm_features.dot(self.theta_hat) + beta * ta
ucb = ucb + noise
UCB_action= np.argmax(ucb)
return UCB_action
def update(self, a_t, r_t):
phi = self.arm_features[a_t]
self.Ainv = self.Ainv - np.dot(self.Ainv, np.dot(np.outer(phi, phi), self.Ainv)) / (1. + np.dot(phi.T, np.dot(self.Ainv, phi)))
self.A += np.outer(phi,phi)
self.rewards_history.append(r_t)
self.features_history.append(phi)
self.n_samples += 1
def check_condition(self, theta):
temp = np.array(self.rewards_history).reshape((len(self.rewards_history),)) - self.link_function(np.array(self.features_history).dot(self.theta_hat))
temp = temp*np.array(self.features_history).reshape((self.d,len(self.rewards_history)))
temp = temp.T
temp = np.sum(temp, axis = 0) - self.reg_factor*theta
return temp
class LinPHE():
def __init__(self, arm_features, reg_factor=1, alpha=2):
self.arm_features = arm_features
self.reg_factor = reg_factor
self.alpha = alpha
self.reset()
def reset(self):
self.K, self.d = self.arm_features.shape
self.design_matrix = self.reg_factor * np.eye(self.d)
self.inv_design_matrix = np.eye(self.d) / (self.reg_factor)
self.iteration = 0
self.N = np.zeros((self.K,))
self.S = np.zeros((self.K,))
def get_action(self):
if self.iteration < -1: #self.d:
choice = np.random.randint(self.K)
else:
temp = np.zeros((self.d,))
for j in range(self.K):
Z = np.random.binomial(1 + int(self.alpha * self.N[j]), 0.5)
temp = temp + self.arm_features[j] * (self.S[j] + Z)
self.theta_hat = np.dot(self.inv_design_matrix, temp) / (self.alpha + 1)
ucb = self.arm_features.dot(self.theta_hat)
noise = np.random.randn(self.K) * 10 ** -7
ucb = ucb + noise
choice = np.argmax(ucb)
self.iteration += 1
return choice
def update(self, a_t, r_t):
self.S[a_t] += r_t * 1
self.N[a_t] += 1
x = self.arm_features[a_t]
self.design_matrix = self.design_matrix + np.outer(x, x)
self.inv_design_matrix = (self.inv_design_matrix - np.dot(self.inv_design_matrix,
np.dot(np.outer(x, x), self.inv_design_matrix)) / (
1. + np.dot(x.T, np.dot(self.inv_design_matrix, x))))
if __name__ == '__main__':
import sys
sys.path[0] = '/Users/evrard/Desktop/monotone_mabs/'
# from isoexp.linear.linearbandit import EfficientLinearBandit, LinearBandit
# from isoexp.conservative.linearmabs import EfficientConservativeLinearBandit, NewCLUB, SafetySetCLUCB, BatchedCLUCB, LinearOracle, LinUCBOracle
from isoexp.linear.linearmab_models import RandomLinearArms, RandomLogArms
from tqdm import trange
from collections import namedtuple
from joblib import Parallel, delayed
seed = np.random.randint(0, 10 ** 5)
MABResults = namedtuple('MABResults', 'regret,norm_error')
noise = 0.1
model = RandomLogArms(n_actions=20, n_features=2, noise=noise,
bound_features=1,
random_state=seed)
model.features = model.features
theta_bound = np.linalg.norm(model.theta, 2)
link = lambda x: 1 / (1 + np.exp(-x))
link_means = np.array([link(np.dot(model.theta, el)) for el in model.features])
means = np.array([(np.dot(model.theta, el)) for el in model.features])
T = 1500
PARALLEL = True
nb_simu = 10
algorithms = {
# 'EfficientLinearBandit': EfficientLinearBandit(arm_features=model.features,
# reg_factor=1.,
# delta=0.1,
# noise_variance=noise,
# bound_theta=theta_bound),
'UCB-GLM-tight-bound': UCB_GLM(arm_features=model.features,
bound_theta=theta_bound,
model='bernoulli',
noise_variance=noise,
reg_factor=1,
delta=0.1,
tighter_ucb=True),
'UCB-GLM': UCB_GLM(arm_features=model.features,
bound_theta=theta_bound,
model='bernoulli',
noise_variance=noise,
reg_factor=1,
delta=0.1,
tighter_ucb=False)}
if PARALLEL:
import multiprocessing
num_cores = multiprocessing.cpu_count()
def work(alg_name, alg):
regret = np.zeros((nb_simu, T))
norms = np.zeros((nb_simu, T))
for k in trange(nb_simu, desc='Simulating {}'.format(alg_name)):
alg.reset()
for t in trange(T, desc='Current episode :', leave=True):
a_t = alg.get_action()
# print(a_t)
r_t = model.reward(a_t)
alg.update(a_t, r_t)
regret[k, t] = link(model.best_arm_reward()) - link(np.dot(model.theta, model.features[a_t]))
if hasattr(alg, 'theta_hat'):
norms[k, t] = np.linalg.norm(alg.theta_hat - model.theta, 2)
return alg_name, MABResults(regret=regret, norm_error=norms)
results = Parallel(n_jobs=num_cores, verbose=1)(
delayed(work)(alg_name, algorithms[alg_name]) for alg_name in algorithms.keys())
else:
results = []
for alg_name, alg in algorithms.items():
regret = np.zeros((nb_simu, T))
norms = np.zeros((nb_simu, T))
cond = np.zeros((nb_simu, T))
draws = np.zeros((nb_simu, T))
for k in trange(nb_simu, desc='Simulating {}'.format(alg_name)):
alg.reset()
for t in trange(T, desc='Current episode ', leave=True):
a_t = alg.get_action()
r_t = model.reward(a_t)
alg.update(a_t, r_t)
regret[k, t] = link(model.best_arm_reward()) - link(np.dot(model.theta, model.features[a_t]))
if hasattr(alg, 'theta_hat'):
norms[k, t] = np.linalg.norm(alg.theta_hat - model.theta, 2)
draws[k, t] = a_t
results += [(alg_name, MABResults(regret=regret, norm_error=norms))]
import pylab as plt
for (alg_name, val) in results:
mean_regret = np.mean(val.regret.cumsum(axis=0), axis=0)
mean_norms = np.mean(val.norm_error, axis=0)
t = np.linspace(1, T + 1, T, dtype='int')
low_quantile = np.quantile(val.regret/t, 0.1, axis=0)
high_quantile = np.quantile(val.regret/t, 0.9, axis=0)
plt.figure(0)
plt.semilogx(mean_regret.cumsum()/t, label=alg_name)
plt.legend()
# plt.fill_between(t, low_quantile, high_quantile, alpha = 0.15)
plt.figure(1)
plt.plot(mean_norms, label=alg_name)
plt.legend()
plt.show()
| ContextualBanditsAttacks-main | isoexp/linear/linearbandit.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
class LinearMABModel(object):
def __init__(self, random_state=0, noise=0.1, features=None, theta=None):
self.local_random = np.random.RandomState(random_state)
self.noise = noise
self.features = features
self.theta = theta
def reward(self, action):
assert 0 <= action < self.n_actions, "{} not in 0 .. {}".format(action, self.n_actions)
reward = np.dot(self.features[action], self.theta) + self.noise * self.local_random.randn(1)
# mean = np.dot(self.features[action], self.theta)
# reward = np.random.binomial(1, mean)
return reward
def best_arm_reward(self):
D = np.dot(self.features, self.theta)
return np.max(D)
@property
def n_features(self):
return self.features.shape[1]
@property
def n_actions(self):
return self.features.shape[0]
class LinPHEModel(object):
def __init__(self, d=10, n_actions=100, random_state=0):
self.local_random = np.random.RandomState(random_state)
self.n_features = d
self.n_actions = n_actions
temp_theta = self.local_random.randn(d - 1)
temp_theta = np.random.uniform(0, 1 / 2) * temp_theta / np.linalg.norm(temp_theta)
self.theta = np.ones(d) / 2
self.theta[:-1] = temp_theta
self.features = np.ones((n_actions, d))
# temp_features = self.local_random.randn(n_actions, d-1)
# temp_features = np.random.uniform(0, 1)*temp_features/np.linalg.norm(temp_features, axis = 1).reshape((self.n_actions, 1))
# print(temp_features)
# self.features[:, :-1] = temp_features
radius = 1
Y = self.local_random.randn(n_actions, d - 1)
U = np.random.uniform(0, 1, size=n_actions)
r = radius * np.power(U, 1. / (d - 1))
F = r / np.linalg.norm(Y, axis=1)
X = Y * F[:, np.newaxis]
self.features[:, :-1] = X
# import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D
# if d-1 == 3:
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# ax.scatter(X[:,0], X[:,1], X[:,2], label='new')
# ax.scatter(temp_features[:,0], temp_features[:,1], temp_features[:,2], label='old')
# plt.legend()
# plt.show()
# if d-1 == 2:
# plt.figure()
# plt.scatter(X[:,0], X[:,1], label='new')
# plt.scatter(temp_features[:,0], temp_features[:,1], label='old')
# plt.legend()
# plt.show()
#
# print(X)
def reward(self, action):
assert 0 <= action < self.n_actions, "{} not in 0 .. {}".format(action, self.n_actions)
reward = self.local_random.binomial(1, np.dot(self.theta, self.features[action]))
return reward
def best_arm_reward(self):
D = np.dot(self.features, self.theta)
return np.max(D)
def means(self):
D = np.dot(self.features, self.theta)
return D
class RandomLogArms(object) :
def __init__(self, random_state = 0, noise = .1,
n_actions = 4, n_features = 100,
bound_features = 1, bound_theta = 1) :
features = np.random.randn(n_actions, n_features)
self.features = bound_features*features/max(np.linalg.norm(features, axis = 1))
theta = np.random.randn(n_features)
self.theta = np.random.uniform(low = 0, high = bound_theta)*theta/np.linalg.norm(theta)
self.link = lambda x : 1/(1 + np.exp(-x))
self.noise = noise
self.local_random = np.random.RandomState(random_state)
self.n_actions, self.n_features = n_actions, n_features
temp = np.dot(self.features,self.theta) + bound_features
self.kappa = min(self.link(temp)*(1 - self.link(temp)))
def reward(self, action) :
reward = self.link(np.dot(self.features[action], self.theta)) + self.noise * self.local_random.randn(1)
return reward
def best_arm_reward(self):
D = np.dot(self.features, self.theta)
return self.link(np.max(D))
class RandomNormalLinearArms(LinearMABModel):
def __init__(self, random_state=0, noise=0., n_actions=4, n_features=4, reward_lim=(-np.inf, np.inf)):
features = np.random.randn(n_actions, n_features)
real_theta = np.random.randn(n_features) * 0.5
means = np.dot(features, real_theta)
idxs = (means < reward_lim[0]) | (means > reward_lim[1])
idxs = np.arange(n_actions)[idxs]
for i in idxs:
mean = -np.inf
feat = None
while mean > reward_lim[1] or mean < reward_lim[0]:
feat = np.random.randn(1, n_features)
mean = np.dot(feat, real_theta)
features[i, :] = feat
super(RandomNormalLinearArms, self).__init__(random_state=random_state, noise=noise,
features=features, theta=real_theta)
class RandomLinearArms(LinearMABModel):
def __init__(self, random_state=0, noise=0., n_actions=4, n_features=4, bound_features=1, bound_theta = 1, positive=True, max_one=True):
features = np.random.randn(n_actions, n_features)
real_theta = np.random.randn(n_features)
real_theta = np.random.uniform(low = 1/2, high = bound_theta)*real_theta/np.linalg.norm(real_theta)
if positive:
idxs = np.dot(features, real_theta) <= 0
idxs = np.arange(n_actions)[idxs]
for i in idxs:
mean = -1
feat = None
while mean <= 0:
feat = np.random.randn(1, n_features)
mean = np.dot(feat, real_theta)
features[i, :] = feat
features = np.random.uniform(low = 1/2, high = bound_features, size = (n_actions,1)) * features / max(np.linalg.norm(features, axis=1))
if max_one:
D = np.dot(features, real_theta)
min_rwd = min(D)
max_rwd = max(D)
min_features = features[np.argmin(D)]
features = (features - min_features) / (max_rwd - min_rwd)
super(RandomLinearArms, self).__init__(random_state=random_state, noise=noise,
features=features, theta=real_theta)
class DiffLinearArms(LinearMABModel):
def __init__(self, random_state=0, noise=0., n_actions=4, n_features=2, real_theta=np.array([9 / 10, 1 / 10]),
optimal_arm=np.array([1, 0]), baseline_arm=np.array([0, 1]), concentration_coeff=0.3):
baseline_arm = baseline_arm.reshape((baseline_arm.shape[0], 1))
features = baseline_arm + concentration_coeff * np.random.randn(n_features, n_actions)
idxs = np.dot(real_theta, features) <= 0
idxs = np.arange(n_actions)[idxs]
for i in idxs:
mean = -1
feat = None
while mean <= 0:
feat = baseline_arm + concentration_coeff * np.random.randn(n_features, 1)
mean = float(np.dot(real_theta, feat))
features[:, i] = feat.squeeze()
optimal_arm = optimal_arm.reshape((optimal_arm.shape[0], 1))
features = np.concatenate((features, optimal_arm), axis=1)
features = np.concatenate((features, baseline_arm), axis=1)
super(DiffLinearArms, self).__init__(random_state=random_state, noise=noise, features=features,
theta=real_theta)
class OtherArms(LinearMABModel):
def __init__(self, random_state=0, noise=0., n_actions=4, n_features=2):
angular_fraction = np.linspace(0, np.pi / 2, n_actions)
features = np.zeros((n_actions, n_features))
features[:, 0] = np.cos(angular_fraction)
features[:, 1] = np.sin(angular_fraction)
real_theta = np.array([1 - np.pi / (4 * n_actions), np.pi / (4 * n_actions)])
super(OtherArms, self).__init__(random_state=random_state, noise=noise, features=features, theta=real_theta)
class CircleBaseline(LinearMABModel):
def __init__(self, random_state=0, noise=0., n_actions=4, n_features=2, inner_radius=1 / 10, outer_radius=2):
temp = np.random.uniform(0, 2 * np.pi)
theta = outer_radius * np.array([np.cos(temp), np.sin(temp)])
angle_baseline = np.random.uniform(0, 2 * np.pi)
radius_baseline = np.random.uniform(1 / 10, inner_radius)
baseline = radius_baseline * np.array([np.cos(angle_baseline), np.sin(angle_baseline)]).reshape(1, n_features)
features = np.zeros((n_actions - 1, n_features))
radius_features = np.random.uniform(low=2 * inner_radius, high=outer_radius, size=(n_actions - 1, 1))
# radius_features = np.random.uniform(low = 0, high = inner_radius, size = (n_actions-1,1))
angle_features = np.random.uniform(0, 2 * np.pi, size=n_actions - 1)
features[:, 0] = np.cos(angle_features)
features[:, 1] = np.sin(angle_features)
features = radius_features * features
features = np.concatenate((features, baseline), axis=0)
# features = np.concatenate((baseline, features), axis = 0)
super(CircleBaseline, self).__init__(random_state=random_state, noise=noise, features=features, theta=theta)
| ContextualBanditsAttacks-main | isoexp/linear/linearmab_models.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
from .linearmab_models import LinearMABModel
class ColdStartFromDatasetModel(LinearMABModel):
def __init__(self, arm_csvfile, user_csvfile, random_state=0, noise=0.):
features = np.loadtxt(arm_csvfile, delimiter=',').T
thetas = np.loadtxt(user_csvfile, delimiter=',')
super(ColdStartFromDatasetModel, self).__init__(random_state=random_state, noise=noise,
features=features, theta=None)
self.theta_idx = np.random.randint(low=0, high=thetas.shape[0])
print("Selecting user: {}".format(self.theta_idx))
self.theta = thetas[self.theta_idx]
# self.theta = np.random.randn(thetas.shape[1])
D = np.dot(self.features, self.theta)
min_rwd = min(D)
max_rwd = max(D)
min_features = features[np.argmin(D)]
self.features = (self.features - min_features) / (max_rwd - min_rwd)
| ContextualBanditsAttacks-main | isoexp/linear/coldstart.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
import numpy as np
import sys
import numpy.random as npr
from tqdm import tqdm
class contextEpsGREEDY():
"""
Args:
T (int): horizon
arms (list): list of available arms
Returns:
rewards (array-like): observed rewards
draws (array-like): indexes of selected arms
"""
def __init__(self, number_arms, dimension, epsilon=0.1, decrease_epsilon=False):
self.decrease_epsilon = decrease_epsilon
self.epsilon = epsilon
self.K = number_arms
self.dimension = dimension
self.rewards = []
self.draws = []
self.number_pulls = np.ones((self.K,)) # number of observations of each arm
self.sum_rewards = np.zeros((self.K,)) # sum of rewards for each arm
self.thetas = np.zeros((self.K, self.dimension))
self.As = [np.identity(self.dimension) for _ in range(number_arms)]
self.rewards_matrix = np.zeros((self.K, self.dimension))
self.nb_iter=0
self.inv_design_matrices = np.zeros((self.K, dimension, dimension))
for arm in range(self.K):
self.inv_design_matrices[arm] = np.eye(dimension, dimension)
def estimated_best_arm(self, context):
return np.argmax(self.thetas.dot(context))
def get_action(self, context, deterministic=False):
if self.nb_iter < self.K:
return self.nb_iter
else:
# select the chosen_arm
expected_rewards = self.thetas.dot(context)
rnd = np.random.rand()
if not deterministic and rnd <= self.epsilon / (math.sqrt(self.nb_iter + 1) if self.decrease_epsilon else 1):
chosen_arm = np.random.choice(self.K)
else:
idxs = np.flatnonzero(np.isclose(expected_rewards, expected_rewards.max()))
chosen_arm = np.asscalar(np.random.choice(idxs))
return chosen_arm
def update(self, context, chosen_arm, reward):
# update quantities
self.nb_iter += 1
self.rewards.append(reward)
self.draws.append(chosen_arm)
self.sum_rewards[chosen_arm] += reward
self.number_pulls[chosen_arm] += 1
self.As[chosen_arm] += np.outer(context, context)
self.rewards_matrix[chosen_arm] += reward * context
self.thetas[chosen_arm] = np.linalg.inv(self.As[chosen_arm]).dot(self.rewards_matrix[chosen_arm])
return self.rewards, self.draws
class ContextualLinearBandit(object):
def __init__(self, nb_arms, dimension, reg_factor=1., delta=0.5,
bound_features=None, noise_variance=None, bound_context=None, alpha=None):
self.K = nb_arms
self.dim = dimension
self.reg_factor = reg_factor
self.delta = delta
self.exploration_coeff = alpha
self.iteration = None
self.bound_context = bound_context
self.bound_features = bound_features
self.noise_variance = noise_variance
self.reset()
def reset(self):
d = self.dim
self.thetas = np.zeros((self.K, d))
self.inv_design_matrices = np.zeros((self.K, d, d))
self.bs = self.thetas.copy()
for arm in range(self.K):
self.inv_design_matrices[arm] = np.eye(d, d) / self.reg_factor
# self.range = 1
# self.est_bound_theta = 0
# self.est_bound_features = 0
self.n_samples = np.zeros((self.K,))
self.iteration = 0
@property
def n_actions(self):
return self.K
@property
def n_features(self):
return self.n_features
# def auto_alpha(self):
# d = self.n_features
# sigma, B, D = self.noise_variance, self.bound_theta, self.bound_features
# return sigma * np.sqrt(d * np.log((1 + max(1, self.iteration - 1) * D * D / self.reg_factor) / self.delta)) \
# + np.sqrt(self.reg_factor) * B
def alpha(self):
d = self.dim
# print(d)
sigma, B, D = self.noise_variance, self.bound_context, self.bound_features
if self.exploration_coeff is None:
return sigma * np.sqrt(
d * np.log((1 + np.maximum(1, self.n_samples) * B * B / self.reg_factor) / self.delta)) \
+ np.sqrt(self.reg_factor) * D
else:
return self.exploration_coeff
def get_action(self, context, deterministic=False):
self.iteration += 1
# Let's not be biased with tiebreaks, but add in some random noise
noise = np.random.random(self.K) * 0.000001
estimate = np.zeros((self.K,))
sfactor = self.alpha()
for arm in range(self.K):
Ainv = self.inv_design_matrices[arm]
# print(Ainv)
b = self.bs[arm]
theta_hat = np.dot(Ainv, b)
self.thetas[arm] = theta_hat
ta = np.dot(context, np.dot(Ainv, context))
sfactor = self.alpha()
# print('sfactor =', sfactor)
# print('context = ', context)
# print('theta_hat=', theta_hat)
# print('ta = ', ta)
estimate[arm] = np.dot(context, theta_hat) + sfactor[arm] * np.sqrt(ta)
ucb = estimate + (0 if deterministic else noise)
choice = np.argmax(ucb) # choose the highest
# print(ucb[choice])
return choice
def update(self, context, a_t, r_t):
self.inv_design_matrices[a_t] = self.inv_design_matrices[a_t] - \
np.dot(self.inv_design_matrices[a_t], np.dot(np.outer(context, context),
self.inv_design_matrices[a_t])) \
/ (1. + np.dot(context.T, np.dot(self.inv_design_matrices[a_t], context)))
self.bs[a_t] += r_t * context
self.n_samples[a_t] += 1
| ContextualBanditsAttacks-main | isoexp/mab/contextual_mab_algs.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import math
from scipy.stats import truncnorm
class ContextualLinearMABModel(object):
def __init__(self, random_state=0, noise=0.1, theta=None):
if isinstance(random_state, int):
self.local_random = np.random.RandomState(random_state)
else:
assert isinstance(random_state, np.random.RandomState), "random state is neither an int nor a random number generator"
self.local_random = random_state
self.noise = noise
self.theta = theta
def reward(self, context, action):
assert 0 <= action < self.n_actions, "{} not in 0 .. {}".format(action, self.n_actions)
reward = self.expected_reward(action, context) + self.noise * self.local_random.randn(1)
# mean = np.dot(self.features[action], self.theta)
# reward = np.random.binomial(1, mean)
return reward
def expected_reward(self, action, context):
return np.dot(context, self.theta[action])
def best_expected_reward(self, context):
D = np.dot(self.theta, context)
return np.max(D)
def best_arm(self, context):
D = np.dot(self.theta, context)
return np.argmax(D)
@property
def n_features(self):
return self.theta.shape[1]
@property
def n_actions(self):
return self.theta.shape[0]
def compute_regret(self, context, a_t):
D = np.dot(self.theta, context)
return np.max(D) - D[a_t]
class LinContextualArm(object):
def __init__(self, theta: np.array, random_state:int):
"""
Args:
mean: expectation of the arm
variance: variance of the arm
random_state (int): seed to make experiments reproducible
"""
self.theta = theta
self.local_random = np.random.RandomState(random_state)
def sample(self, random_state):
pass
class LinBernoulliArm(LinContextualArm):
def __init__(self, theta, random_state=0):
"""
Bernoulli arm
Args:
p (float): mean parameter
random_state (int): seed to make experiments reproducible
"""
super(LinBernoulliArm, self).__init__(theta=theta, random_state=random_state)
def sample(self, context: np.array):
proba = sigmoid(np.dot(self.theta, context))
return self.local_random.rand(1) < proba
def sigmoid(x):
return 1 / (1 + np.exp(-x))
| ContextualBanditsAttacks-main | isoexp/mab/contextual_arms.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import sys
import numpy.random as npr
import cvxpy as cp
from tqdm import trange
from tqdm import tqdm
def UCB1(T, MAB, alpha=1.):
"""
Args:
T (int): horizon
MAB (list): list of available MAB models
alpha (float): shrink confidence interval
Returns:
rewards (array-like): observed rewards
draws (array-like): indexes of selected arms
"""
K = len(MAB)
rewards = np.zeros((T,))
draws = np.zeros((T,))
N = np.ones((K,)) # number of observations of each arm
S = np.zeros((K,)) # sum of rewards for each arm
for k in range(K):
a = k
r = MAB[a].sample()
# update quantities
rewards[k] = r
draws[k] = a
S[a] += r
N[a] += 1
for t in range(K, T):
# select the arm
ucb = S / N + alpha * np.sqrt(np.log(t + 1) / N)
idxs = np.flatnonzero(np.isclose(ucb, ucb.max()))
a = np.asscalar(np.random.choice(idxs))
r = MAB[a].sample()
# update quantities
rewards[t] = r
draws[t] = a
S[a] += r
N[a] += 1
return rewards, draws
def TS(T, MAB):
"""
Args:
T (int): horizon
MAB (list): list of available MAB models
Returns:
rewards (array-like): observed rewards
draws (array-like): indexes of selected arms
"""
K = len(MAB)
rewards = np.zeros((T,))
draws = np.zeros((T,))
alphas = np.ones((K,))
betas = np.ones((K,))
for t in range(T):
# sample the arm
thetas = np.random.beta(alphas, betas)
# select and apply action
a = np.argmax(thetas)
r = MAB[a].sample()
# update distribution
alphas[a] += r
betas[a] += 1 - r
rewards[t] = r
draws[t] = a
return rewards, draws
def epsGREEDY(T, MAB, epsilon=0.1):
"""
Args:
T (int): horizon
MAB (list): list of available MAB models
Returns:
rewards (array-like): observed rewards
draws (array-like): indexes of selected arms
"""
K = len(MAB)
rewards = np.zeros((T,))
draws = np.zeros((T,))
N = np.ones((K,)) # number of observations of each arm
S = np.zeros((K,)) # sum of rewards for each arm
for k in range(K):
a = k
r = MAB[a].sample()
# update quantities
rewards[k] = r
draws[k] = a
S[a] += r
N[a] += 1
for t in range(K, T):
# select the arm
ucb = S / N
rnd = np.random.rand()
if rnd <= epsilon:
a = np.random.choice(K)
else:
idxs = np.flatnonzero(np.isclose(ucb, ucb.max()))
a = np.asscalar(np.random.choice(idxs))
r = MAB[a].sample()
# update quantities
rewards[t] = r
draws[t] = a
S[a] += r
N[a] += 1
return rewards, draws
def SoftMAB(T, MAB, temp=1.0):
"""
Args:
T (int): horizon
MAB (list): list of available MAB models
Returns:
rewards (array-like): observed rewards
draws (array-like): indexes of selected arms
"""
K = len(MAB)
rewards = np.zeros((T,))
draws = np.zeros((T,))
N = np.ones((K,)) # number of observations of each arm
S = np.zeros((K,)) # sum of rewards for each arm
for k in range(K):
a = k
r = MAB[a].sample()
# update quantities
rewards[k] = r
draws[k] = a
S[a] += r
N[a] += 1
for t in range(K, T):
# select the arm
ucb = S / N
proba = np.exp(ucb / temp)
proba = proba / np.sum(proba)
a = np.random.choice(K, p=proba)
r = MAB[a].sample()
# update quantities
rewards[t] = r
draws[t] = a
S[a] += r
N[a] += 1
return rewards, draws
def ExploreThenExploit(T, MAB, T1):
K = len(MAB)
rewards = np.zeros((T,))
draws = np.zeros((T,))
N = np.ones((K,)) # number of observations of each arm
S = np.zeros((K,)) # sum of rewards for each arm
T1 = np.ceil(T1).astype(np.int)
for t in range(T1):
a = np.random.choice(K)
r = MAB[a].sample()
# update quantities
rewards[t] = r
draws[t] = a
S[a] += r
N[a] += 1
for t in range(T1, T):
# select the arm
ucb = S / N
idxs = np.flatnonzero(np.isclose(ucb, ucb.max()))
a = np.asscalar(np.random.choice(idxs))
r = MAB[a].sample()
# update quantities
rewards[t] = r
draws[t] = a
S[a] += r
N[a] += 1
return rewards, draws
def UCBV(T, MAB, alpha=1.):
"""
Args:
T (int): horizon
MAB (list): list of available MAB models
alpha (float): shrink confidence interval
Returns:
rewards (array-like): observed rewards
draws (array-like): indexes of selected arms
"""
K = len(MAB)
rewards = np.zeros((T,))
draws = np.zeros((T,))
N = np.ones((K,)) # number of observations of each arm
S = np.zeros((K,)) # sum of rewards for each arm
M = np.zeros((K,)) # second moment (for Welford's algorithm)
vars = np.ones((K,)) * np.inf
for t in range(T):
# select the arm
ln = np.log(t + 1)
ucb = S / N + alpha * (np.sqrt(vars * ln / N) + ln / N)
ucb[N < 2] = sys.maxsize
idxs = np.flatnonzero(np.isclose(ucb, ucb.max()))
a = np.asscalar(np.random.choice(idxs))
r = MAB[a].sample()
# update quantities
rewards[t] = r
draws[t] = a
old_mean = S[a] / N[a] if N[a] > 0 else 0
S[a] += r
N[a] += 1
M[a] = M[a] + (r - old_mean) * (r - S[a]/N[a])
vars[a] = M[a] / N[a] # update variance estimate
return rewards, draws
def BootstrapedUCB(T, MAB, delta = 0.1, b_rep = 200):
K = len(MAB)
rewards = np.zeros((T,))
draws = np.zeros((T,))
N = np.zeros((K,))
S = np.zeros((K,))
rewards_arm = {}
for k in range(K):
a = k
r = 1*MAB[a].sample().squeeze()
rewards[k] = r
draws[k] = a
rewards_arm[k] = [r]
S[a] += r
N[a] += 1
for t in range(K, T):
alpha = 1/(t+1)
bootstrap_quantile = quantile((1-delta)*alpha, S, N, rewards_arm, B = b_rep)
phi = np.sqrt(2*np.log(1/alpha)/N)
## Theoretical ucb
#ucb = S / N + (bootstrap_quantile + np.sqrt(np.log(2/(delta*alpha))/N)*phi)
## Ucb used in practice
ucb = S / N + (bootstrap_quantile + np.sqrt(1/N)*phi)
idxs = np.flatnonzero(np.isclose(ucb, ucb.max()))
a = np.asscalar(np.random.choice(idxs))
r = 1*MAB[a].sample().squeeze()
rewards[t] = r
draws[t] = a
rewards_arm[a].append(r)
S[a] += r
N[a] += 1
return rewards, draws
def quantile(alpha, S, N, rwds, B = 100, distrib = 'rademacher') :
means = np.nan_to_num(S/N)
K = len(N)
np_quantile = np.zeros(K)
for k in range(K) :
n = N[k]
if n > 0 :
bootstrap_avg = np.zeros(B)
if distrib == 'rademacher' :
weights = 2*npr.binomial(1, 1/2, size = (int(B),int(n))) - 1
elif distrib =='gaussian' :
weights = npr.randn(int(B),int(n))
history = np.array(rwds[k]) - means[k]
bootstrap_avg = (np.dot(weights, history)/n)
np_quantile[k] = np.percentile(bootstrap_avg, 100*(1 - alpha), interpolation = 'nearest')
else :
np_quantile[k] = +np.inf
return np_quantile
def PHE(T, MAB, alpha = 2) :
K = len(MAB)
rewards = np.zeros((T,))
draws = 0*rewards
N = np.zeros((K,))
S = np.zeros((K,))
biased_test = np.zeros((K,))
for k in range(K):
a = k
r = 1*MAB[a].sample().squeeze()
rewards[k] = r
draws[k] = a
S[a] +=r
N[a] +=1
for t in range(K, T) :
for i in range(K) :
Z = np.random.binomial(1,1/2, size = int(alpha*N[i]))
biased_test[i] = (np.sum(Z) + S[i])/((alpha+1)*N[i])
idxs = np.flatnonzero(np.isclose(biased_test, biased_test.max()))
a = np.asscalar(np.random.choice(idxs))
r = 1*MAB[a].sample().squeeze()
N[a] +=1
S[a] +=r
rewards[t] = r
draws[t] = a
return rewards, draws
def Random_exploration(T, MAB, alpha = 2) :
K = len(MAB)
rewards = np.zeros((T,))
draws = 0*rewards
N = np.zeros((K,))
S = np.zeros((K,))
biased_test = np.zeros((K,))
for k in range(K):
a = k
r = 1*MAB[a].sample().squeeze()
rewards[k] = r
draws[k] = a
S[a] +=r
N[a] +=1
for t in range(K, T) :
for i in range(K) :
Z = np.random.binomial(1,1/2, size = int(alpha*N[i]))
biased_test[i] = np.nan_to_num(np.mean(Z))+ S[i]/N[i]
idxs = np.flatnonzero(np.isclose(biased_test, biased_test.max()))
a = np.asscalar(np.random.choice(idxs))
r = 1*MAB[a].sample().squeeze()
N[a] +=1
S[a] +=r
rewards[t] = r
draws[t] = a
return rewards, draws
def EXP3_IX(T, MAB, eta = 0.1, gamma = 0):
K = len(MAB)
losses = np.zeros((K,))
rewards = np.zeros((T,))
draws = 0*rewards
sum_exp = K
exp_losses = np.ones((K,))
arms = np.linspace(0, K-1, K, dtype='int')
for t in range(T):
# print('cum_losses =', exp_losses)
# print('sum losses=', sum_exp)
P = exp_losses/sum_exp
# print('P =', P)
action = np.random.choice(arms, p=P)
X = 1*MAB[action].sample().squeeze()
losses[action] = losses[action] + (1 - X)/(gamma + P[action])
exp_losses[action] = exp_losses[action]*np.exp(-eta* (1 - X)/(gamma + P[action]))
sum_exp = np.sum(exp_losses)
rewards[t] = X
draws[t] = action
return rewards, draws
def attacked_EXP3_IX(T, MAB, target_arm, eta = None, gamma = None, delta=0.99):
K = len(MAB)
losses = np.zeros((K,))
rewards = np.zeros((T,))
draws = 0*rewards
sum_exp = K
exp_losses = np.ones((K,))
arms = np.linspace(0, K-1, K, dtype='int')
N = np.ones((K,)) # number of observations of each arm
S = np.zeros((K,))
beta = np.zeros((K,))
attacks = np.zeros((T,))
time_of_attacks = np.zeros((T,))
if eta is None or gamma is None:
eta = np.sqrt(2*np.log(K + 1)/(K*T))
gamma = np.sqrt(2*np.log(K + 1)/(K*T))/2
for t in range(T):
P = exp_losses/sum_exp
if t < K:
action = t
attack_t = 0
else:
time_of_attacks[t] = 1
action = np.random.choice(arms, p=P)
if action != target_arm:
beta = np.sqrt(np.log(np.pi ** 2 * K * N ** 2 / (3 * delta)) / (2*N))
attack_t = - np.maximum((S / N)[action] - (S / N)[target_arm] + beta[action] + beta[target_arm], 0)
else:
attack_t = 0
attacks[t] = attack_t
true_X = 1*MAB[action].sample().squeeze()
X = true_X + attack_t
losses[action] = losses[action] + (1 - X)/(gamma + P[action])
exp_losses[action] = exp_losses[action]*np.exp(-eta*(1 - X)/(gamma + P[action]))
sum_exp = np.sum(exp_losses)
rewards[t] = true_X
draws[t] = action
N[action] += 1
S[action] += true_X
return rewards, draws, attacks, time_of_attacks
def attacked_UCB1(T, MAB, target_arm, alpha=1., delta=0.99, constant_attack=False):
K = len(MAB)
rewards = np.zeros((T,))
draws = np.zeros((T,))
N = np.ones((K,)) # number of observations of each arm
S = np.zeros((K,)) # sum of rewards for each arm
N_pre = np.ones((K,)) # number of observations of each arm
S_pre = np.zeros((K,))
attacks = np.zeros((T,))
time_of_attacks = np.zeros((T,))
for k in range(K):
a = k
r = MAB[a].sample()
rewards[k] = r
draws[k] = a
S[a] += r
N[a] += 1
S_pre[a] += r
N_pre[a] += 1
attacks[k] = 0
for t in range(K, T):
# select the arm
ucb = S / N + alpha * np.sqrt(np.log(t + 1) / N)
beta = np.sqrt(np.log(np.pi**2*K*N**2/(3*delta))/(2*N))
idxs = np.flatnonzero(np.isclose(ucb, ucb.max()))
a = np.asscalar(np.random.choice(idxs))
if a != target_arm:
time_of_attacks[t] = 1
if constant_attack:
attack_t = - 2 * np.maximum(0, MAB[a].mean - MAB[target_arm].mean)
else:
beta = np.sqrt(np.log(np.pi ** 2 * K * N ** 2 / (3 * delta)) / (2 * N))
attack_t = - np.maximum((S_pre / N)[a] - (S_pre / N)[target_arm] + beta[a] + beta[target_arm], 0)
else:
attack_t = 0
attacks[t] = attack_t
r = MAB[a].sample()
false_r = r + attack_t
# update quantities
rewards[t] = r
draws[t] = a
S[a] += false_r
N[a] += 1
S_pre[a] += r
N_pre[a] += 1
return rewards, draws, attacks, time_of_attacks
def EXP3_P(T, MAB, eta=0.1, gamma=0):
K = len(MAB)
S = np.zeros((K,))
rewards = np.zeros((T,))
draws = 0*rewards
sum_exp = K
exp_S = np.ones((K,))
arms = np.linspace(0, K-1, K, dtype='int')
for t in range(T):
P = (1 - gamma) * exp_S / sum_exp + gamma / K * np.ones((K,))
if t < K:
action = t
attack_t = 0
else:
# print('Probability distribution:', P)
action = np.random.choice(arms, p=P)
X = 1*MAB[action].sample().squeeze()
S = S + 1
S[action] = S[action] - (1 - X)/P[action]
exp_S = exp_S*np.exp(eta)
exp_S[action] = exp_S[action]*np.exp(-eta *(1 - X)/P[action])
sum_exp = np.sum(exp_S)
rewards[t] = X
draws[t] = action
return rewards, draws
def attacked_EXP3_P(T, MAB, target_arm, eta = None, gamma = None, delta=0.99, constant_attack=False):
K = len(MAB)
estimated_S = np.zeros((K,))
rewards = np.zeros((T,))
draws = 0 * rewards
sum_exp = K
exp_estimated_S = np.ones((K,))
arms = np.linspace(0, K - 1, K, dtype='int')
N = np.ones((K,)) # number of observations of each arm
S = np.zeros((K,))
beta = np.zeros((K,))
attacks = np.zeros((T,))
time_of_attacks = np.zeros((T,))
if eta is None and gamma is None:
eta = np.sqrt(np.log(K + 1) / (K * T))
gamma = 0
elif eta is None:
eta = np.sqrt(np.log(K + 1) / (K * T))
elif gamma is None:
gamma = 0
for t in range(T):
P = (1 - gamma) * exp_estimated_S / sum_exp + gamma/K*np.ones((K,))
if t < K:
action = t
attack_t = 0
else:
action = np.random.choice(arms, p=P)
if action != target_arm:
time_of_attacks[t] = 1
if constant_attack:
attack_t = - 2*np.maximum(0, MAB[action].mean - MAB[target_arm].mean)
else:
beta = np.sqrt(np.log(np.pi ** 2 * K * N ** 2 / (3 * delta)) / (2 * N))
attack_t = - np.maximum((S / N)[action] - (S / N)[target_arm] + beta[action] + beta[target_arm], 0)
else:
attack_t = 0
attacks[t] = attack_t
true_X = 1 * MAB[action].sample().squeeze()
X = true_X + attack_t
estimated_S = estimated_S + 1
estimated_S[action] = estimated_S[action] - (1 - X) /P[action]
exp_estimated_S = exp_estimated_S*np.exp(eta)
exp_estimated_S[action] = exp_estimated_S[action] * np.exp(eta * (- (1 - X) /P[action]))
sum_exp = np.sum(exp_estimated_S)
rewards[t] = true_X
draws[t] = action
N[action] += 1
S[action] += true_X
return rewards, draws, attacks, time_of_attacks
def FTRL(T, MAB, eta=10, alg='exp_3'):
K = len(MAB)
S = np.zeros((K,))
losses = np.zeros((K,))
rewards = np.zeros((T,))
draws = 0*rewards
arms = np.linspace(0, K-1, K, dtype='int')
for t in trange(T):
x = cp.Variable(K, pos=True)
temp_1 = cp.Constant(value=np.ones((K,)))
temp_2 = cp.Constant(value=losses)
constraints = [cp.sum(cp.multiply(temp_1, x)) == 1]
if alg == 'log_barrier':
obj = cp.Minimize(cp.sum(cp.multiply(temp_2, x)) - 1/eta*cp.sum(cp.log(x)))
elif alg == 'inf':
obj = cp.Minimize(cp.sum(cp.multiply(temp_2, x)) - 2/eta*cp.sum(cp.sqrt(x)))
else:
obj = cp.Minimize(cp.sum(cp.multiply(temp_2, x)) + 1/eta*(cp.sum(cp.kl_div(x, temp_1)) - K))
pb = cp.Problem(obj, constraints)
try:
pb.solve()
P = x.value
except:
P = np.ones((K,))/K
# print('Probability distribution:', P)
if not np.sum(P) == 1:
P = P/np.sum(P)
action = np.random.choice(arms, p=P)
X = 1*MAB[action].sample().squeeze()
S[action] = S[action] + X/P[action]
losses[action] = losses[action] + (-X)/P[action]
rewards[t] = X
draws[t] = action
return rewards, draws
def attacked_FTRL(T, MAB, target_arm, eta=10, alg='exp_3', delta=0.99, constant_attack=False):
K = len(MAB)
true_S = np.zeros((K,))
true_losses = np.zeros((K,))
N = np.zeros((K,))
estimated_losses = np.zeros((K,))
rewards = np.zeros((T,))
draws = 0*rewards
arms = np.linspace(0, K-1, K, dtype='int')
attacks = np.zeros((T,))
time_of_attacks = np.zeros((T,))
for t in trange(T):
x = cp.Variable(K, pos=True)
temp_1 = cp.Constant(value=np.ones((K,)))
temp_2 = cp.Constant(value=estimated_losses)
constraints = [cp.sum(cp.multiply(temp_1, x)) == 1]
if alg == 'log_barrier':
obj = cp.Minimize(cp.sum(cp.multiply(temp_2, x)) - 1/eta*cp.sum(cp.log(x)))
elif alg == 'inf':
obj = cp.Minimize(cp.sum(cp.multiply(temp_2, x)) - 2/eta*cp.sum(cp.sqrt(x)))
else:
obj = cp.Minimize(cp.sum(cp.multiply(temp_2, x)) + 1/eta*(cp.sum(cp.kl_div(x, temp_1)) - K))
pb = cp.Problem(obj, constraints)
try:
pb.solve()
P = x.value
except:
P = np.ones((K,))/K
# print("\nThe optimal value is", pb.value)
# print("A solution x is")
# print(x.value)
# print("A dual solution corresponding to the inequality constraints is")
# print(pb.constraints[0].dual_value)
# print('Probability distribution:', P)
if not np.sum(P) == 1:
P = P/np.sum(P)
if t < K:
action = t
attack_t = 0
else:
action = np.random.choice(arms, p=P)
if action != target_arm:
time_of_attacks[t] = 1
beta = np.sqrt(np.log(np.pi ** 2 * K * N ** 2 / (3 * delta)) / (2 * N))
if constant_attack:
attack_t = - 2*np.maximum(0, MAB[action].mean - MAB[target_arm].mean)
else:
attack_t = - np.maximum((true_S / N)[action] - (true_S / N)[target_arm] + beta[action]
+ beta[target_arm], 0)
else:
attack_t = 0
attacks[t] = attack_t
true_X = 1*MAB[action].sample().squeeze()
X = true_X + attack_t
true_S[action] = true_S[action] + true_X
true_losses[action] = true_losses[action] + (1-true_X)/P[action]
estimated_losses[action] = estimated_losses[action] + (1 - X)/P[action]
N[action] = N[action] + 1
rewards[t] = true_X
draws[t] = action
return rewards, draws, attacks, time_of_attacks
| ContextualBanditsAttacks-main | isoexp/mab/smab_algs.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import math
from scipy.stats import truncnorm
class AbstractArm(object):
def __init__(self, mean, variance, random_state):
"""
Args:
mean: expectation of the arm
variance: variance of the arm
random_state (int): seed to make experiments reproducible
"""
self.mean = mean
self.variance = variance
self.local_random = np.random.RandomState(random_state)
def sample(self):
pass
class ArmTruncNorm():
def __init__(self, original_mean=0, a=-1, b=1, original_std=0.1):
a, b = (a - original_mean) / original_std, (b - original_mean) / original_std
self.a = a
self.b = b
self.true_sigma = original_std
self.true_mean = original_mean
self.mean, self.sigma = truncnorm.stats(a=self.a, b=self.b, loc=self.true_mean, scale=self.true_sigma)
def sample(self):
return truncnorm.rvs(a=self.a, b=self.b, loc=self.true_mean, scale=self.true_sigma)
class ArmBernoulli(AbstractArm):
def __init__(self, p, random_state=0):
"""
Bernoulli arm
Args:
p (float): mean parameter
random_state (int): seed to make experiments reproducible
"""
self.p = p
super(ArmBernoulli, self).__init__(mean=p,
variance=p * (1. - p),
random_state=random_state)
def sample(self):
return self.local_random.rand(1) < self.p
class ArmBeta(AbstractArm):
def __init__(self, a, b, random_state=0):
"""
arm having a Beta distribution
Args:
a (float): first parameter
b (float): second parameter
random_state (int): seed to make experiments reproducible
"""
self.a = a
self.b = b
super(ArmBeta, self).__init__(mean=a / (a + b),
variance=(a * b) / ((a + b) ** 2 * (a + b + 1)),
random_state=random_state)
def sample(self):
return self.local_random.beta(self.a, self.b, 1)
class ArmExp(AbstractArm):
# https://en.wikipedia.org/wiki/Truncated_distribution
# https://en.wikipedia.org/wiki/Exponential_distribution
# http://lagrange.math.siu.edu/Olive/ch4.pdf
def __init__(self, L, B=1., random_state=0):
"""
pdf =
Args:
L (float): parameter of the exponential distribution
B (float): upper bound of the distribution (lower is 0)
random_state (int): seed to make experiments reproducible
"""
assert B > 0.
self.L = L
self.B = B
v_m = (1. - np.exp(-B * L) * (1. + B * L)) / L
super(ArmExp, self).__init__(mean=v_m / (1. - np.exp(-L * B)),
variance=None, # compute it yourself!
random_state=random_state)
def cdf(self, x):
cdf = lambda y: 1. - np.exp(-self.L * y)
truncated_cdf = (cdf(x) - cdf(0)) / (cdf(self.B) - cdf(0))
return truncated_cdf
def inv_cdf(self, q):
assert 0 <= q <= 1.
v = - np.log(1. - (1. - np.exp(- self.L * self.B)) * q) / self.L
return v
def sample(self):
# Inverse transform sampling
# https://en.wikipedia.org/wiki/Inverse_transform_sampling
q = self.local_random.random_sample(1)
x = self.inv_cdf(q=q)
return x
class ArmFinite(AbstractArm):
def __init__(self, X, P, random_state=0):
"""
Arm with finite support
Args:
X: support of the distribution
P: associated probabilities
random_state (int): seed to make experiments reproducible
"""
self.X = X
self.P = P
mean = np.sum(X * P)
super(ArmFinite, self).__init__(mean=mean,
variance=np.sum(X ** 2 * P) - mean ** 2,
random_state=random_state)
def sample(self):
i = self.local_random.choice(len(self.P), size=1, p=self.P)
reward = self.X[i]
return reward
class ArmNormal(AbstractArm):
def __init__(self, mu, sigma, random_state=0):
self.sigma = sigma
super(ArmNormal, self).__init__(mean=mu,
variance=sigma ** 2,
random_state=random_state)
def sample(self):
x = self.local_random.randn() * self.sigma + self.mean
return x
if __name__ == '__main__':
arm = ArmTruncNorm(mean=-1, a=0, b=0.01)
print(arm.sample())
| ContextualBanditsAttacks-main | isoexp/mab/arms.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
__version__ = '0.0.dev0'
| ContextualBanditsAttacks-main | isoexp/mab/__init__.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
sys.path.append('/isoexp')
import numpy as np
import isoexp.mab.arms as arms
import pickle
from isoexp.mab.smab_algs import UCB1, EXP3_IX, attacked_UCB1, attacked_EXP3_IX, EXP3_P, attacked_EXP3_P, FTRL, attacked_FTRL
from matplotlib import rc
import json
import datetime
rc('text', usetex=True)
import matplotlib.pyplot as plt
from tqdm import tqdm
from collections import namedtuple
MABResults = namedtuple('MABResults', 'regret, cum_rewards, attacks, times_of_attacks')
random_state = np.random.randint(0, 123123)
K = 5
MAB = []
means = np.random.uniform(low=0.25, high=0.75, size=K)
#means = np.array([0.47823152, 0.70243227, 0.64504063, 0.65679234, 0.49546542,
# 0.46417188, 0.64736977, 0.71255566, 0.66844984, 0.26030838])
for k in range(K) :
#MAB.append(arms.ArmBeta(a=8*means[k], b=8*(1-means[k])))
MAB.append(arms.ArmBernoulli(p=means[k]))
nb_arms = len(MAB)
print('means: {}'.format(means))
mu_max = np.max(means)
a_star = np.argmin(means)
T = 1*10**4# horizon
nb_simu = 10
eta = np.sqrt(2*np.log(K + 1)/(K*T))
# eta = 0.01
gamma = eta/2
settings = {
"T": T,
"nb_simu": nb_simu,
"random_state": random_state,
"K": K,
}
algorithms = {
#'EXP3': lambda T, MAB: FTRL(T, MAB, eta=eta, alg='epx_3'),
'INF': lambda T, MAB: FTRL(T, MAB, eta=eta, alg='inf'),
'Attacked INF': lambda T, MAB: attacked_FTRL(T, MAB, target_arm=a_star, eta=eta, alg='inf'),
# 'FTRL log barrier' : lambda T, MAB: FTRL(T, MAB, eta=eta, alg='log_barrier'),
# 'Attacked FTRL log barrier': lambda T, MAB: attacked_FTRL(T, MAB, target_arm=a_star, eta=eta, alg='log_barrier'),
'UCB': lambda T, MAB: UCB1(T, MAB, alpha=1),
'Attacked UCB': lambda T, MAB: attacked_UCB1(T, MAB, target_arm = a_star, alpha=1., delta=0.99),
# 'EXP3-IX': lambda T, MAB: EXP3_IX(T, MAB, eta=eta, gamma=gamma),
# 'Attacked EXP3-IX': lambda T, MAB: attacked_EXP3_IX(T, MAB, target_arm=a_star),
'EXP3': lambda T, MAB: EXP3_P(T, MAB, eta=np.sqrt(np.log(K)/(T*K))),
'Attacked EXP3': lambda T, MAB: attacked_EXP3_P(T, MAB, target_arm=a_star),
# 'EXP3.P Gamma 0.1': lambda T, MAB: EXP3_P(T, MAB, gamma=0.1, eta=np.sqrt(np.log(K)/(K*T))),
# 'Attacked EXP3.P Gamma 0.1': lambda T, MAB: attacked_EXP3_P(T, MAB, target_arm=a_star, gamma=0.1, eta=np.sqrt(np.log(K)/(K*T)))
}
results = []
full_algo = algorithms
for alg_name in full_algo.keys():
alg = full_algo[alg_name]
regret = np.zeros((nb_simu, T))
rwds = 0*regret
times = 0*regret
attacks = 0*regret
for k in tqdm(range(nb_simu), desc='Simulating {}'.format(alg_name)):
try:
rewards, draws = alg(T, MAB)
except ValueError:
rewards, draws, att, times_of_att = alg(T, MAB)
attacks[k] = np.cumsum(att)
times[k] = times_of_att
rwds[k] = np.cumsum(means[draws.astype('int')])
regret[k] = max(means) * np.arange(1, T + 1) - rwds[k]
results += [(alg_name, MABResults(regret=regret, cum_rewards=rwds, attacks=attacks, times_of_attacks=times))]
id = '{:%Y%m%d_%H%M%S}_{}'.format(datetime.datetime.now(), 'GLM')
with open("{}_{}_MAB_illustration.pickle".format(id, "SEQ"), "wb") as f:
pickle.dump(results, f)
with open("{}_{}_MAB_illustration_settings.json".format(id, "SEQ"), "w+") as f:
json.dump(settings, f)
t = np.arange(0, T)
for alg_name, val in results:
mean_regret = np.mean(val.regret, axis=0)
low_quantile_regret = np.quantile(val.regret, 0.25, axis=0)
high_quantile_regret = np.quantile(val.regret, 0.75, axis=0)
rwds = np.mean(val.cum_rewards, axis=0)
low_quantile_rwds = np.quantile(val.cum_rewards, 0.25, axis=0)
high_quantile_rwds = np.quantile(val.cum_rewards, 0.75, axis=0)
plt.figure(1)
plt.title('Rewards')
plt.plot(rwds, label=alg_name)
plt.legend()
plt.fill_between(t, low_quantile_rwds, high_quantile_rwds, alpha=0.15)
plt.figure(2)
plt.title('Regret')
plt.plot(mean_regret, label=alg_name)
plt.legend()
plt.fill_between(t, low_quantile_regret, high_quantile_regret, alpha=0.15)
if 'Attacked' in alg_name:
plt.figure(3)
cum_sum_attacks = np.mean(np.abs(val.attacks), axis=0)
low_quantile_attacks = np.quantile(np.abs(val.attacks), 0.25, axis=0)
high_quantile_attacks = np.quantile(np.abs(val.attacks), 0.75, axis=0)
plt.title('Cumulative sum of attacks')
plt.plot(cum_sum_attacks, label=alg_name)
plt.legend()
plt.fill_between(t, low_quantile_attacks, high_quantile_attacks, alpha=0.15)
# plt.figure(2)
# rep = np.random.randint(low=0, high=nb_simu)
# times_to_consider = val.times_of_attacks[rep]
# plt.scatter(t[times_to_consider == 1], val.regret[rep, times_to_consider == 1])
plt.figure(4)
plt.title('Number of attacks')
number_of_attacks = np.mean(np.cumsum(val.times_of_attacks, axis=1), axis=0)
high_quantile = np.quantile(np.cumsum(val.times_of_attacks, axis=1), 0.75, axis=0)
low_quantile = np.quantile(np.cumsum(val.times_of_attacks, axis=1), 0.25, axis=0)
plt.plot(number_of_attacks, label=alg_name)
plt.legend()
plt.fill_between(t, low_quantile, high_quantile, alpha=0.15)
plt.show()
#import tikzplotlib
#tikzplotlib.save("lcb_worst.tex")
| ContextualBanditsAttacks-main | isoexp/mab/main_mab.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 3 23:30:58 2019
@author: evrardgarcelon
"""
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pickle
import tikzplotlib
import os
import shutil
import sys
from cycler import cycler
import tarfile
def tardir(path, tar_name):
with tarfile.open(tar_name, "w:gz") as tar_handle:
for root, dirs, files in os.walk(path):
for file in files:
tar_handle.add(os.path.join(root, file))
n = 9 # Number of colors
new_colors = [plt.get_cmap('Set1')(1. * i / n) for i in range(n)]
linestyle_cycler = cycler('linestyle', ['-', '--', ':', '-.', '-', '--', ':', '-.', '-'])
plt.rc('axes', prop_cycle=(cycler('color', new_colors) + linestyle_cycler))
plt.rc('lines', linewidth=2)
if len(sys.argv) == 2:
filename = sys.argv[1]
else:
filename = '20190822_065452_Bernoulli_PAR_martingale_results.pickle'
filename = '20190901_124136_linear_PAR_linear_results.pickle'
filename = '20190902_135139_linear_PAR_linear_results.pickle'
filename = '20190903_233609_linear_PAR_jester_res§ults.pickle'
filename = '20190903_235606_linear_PAR_jester_results.pickle'
filename = '20190904_010618_linear_PAR_jester_results.pickle'
with open(filename, 'rb') as f:
results = pickle.load(f)
print("Opening file %s..." % filename)
folder = filename.split('.')[0]
if os.path.exists(folder):
shutil.rmtree(folder)
os.makedirs(folder)
print("Done.\n")
EVERY = 20
LW = 2
print("Generating all figures ...")
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pickle
import tikzplotlib
import os
import shutil
import sys
from cycler import cycler
import tarfile
def tardir(path, tar_name):
with tarfile.open(tar_name, "w:gz") as tar_handle:
for root, dirs, files in os.walk(path):
for file in files:
tar_handle.add(os.path.join(root, file))
<<<<<<< HEAD
=======
with open(filename, 'rb') as f:
results = pickle.load(f)
print("Opening file %s..." % filename)
folder = filename.split('.')[0]
if os.path.exists(folder):
shutil.rmtree(folder)
os.makedirs(folder)
print("Done.\n")
EVERY = 200
LW = 2
>>>>>>> e314f8f8accdff7717898e2745f92c6c0e230275
print("Generating all figures ...")
bad_model = None
min_val = np.inf
total_experiments = {}
avg_area = {}
avg_margin = {}
for m, model in enumerate(results):
cucb_M, cucb_H = 0, 0
plt.figure()
ymax = -np.inf
T = None
for alg_name, val in model[1]:
if alg_name not in total_experiments.keys():
total_experiments[alg_name] = []
avg_area[alg_name] = []
avg_margin[alg_name] = []
rep, T = val['cum_rewards'].shape
t = np.arange(1, T + 1)
regret = np.cumsum(val['regret'], axis=1)
total_experiments[alg_name] += regret.tolist()
margin = val['cum_rewards'].cumsum(axis=1)
area = np.sum(margin * (margin < 0), axis=1).mean()
print('min_margin(', alg_name, ')=', margin.min())
print('area(', alg_name, ')=', area)
print()
avg_area[alg_name] += [area]
avg_margin[alg_name] += margin.tolist()
mean_regret = np.mean(regret, axis=0)
std = np.std(regret, axis=0) / np.sqrt(rep)
plt.plot(t[::EVERY], mean_regret[::EVERY], linewidth=LW, label=alg_name)
plt.fill_between(t[::EVERY], mean_regret[::EVERY] - 2 * std[::EVERY], mean_regret[::EVERY] + 2 * std[::EVERY],
alpha=0.15)
ymax = max(ymax, mean_regret[-1] + 2 * std[-1])
plt.xlim([0, T])
plt.ylim([0, ymax])
plt.legend()
plt.title("model: {}".format(m))
plt.savefig(os.path.join(folder, "model{}.png".format(m)))
tikzplotlib.save(os.path.join(folder, "model{}.tex".format(m)))
plt.close()
print("Done.\n")
avg_regret_name = os.path.join(folder, "avg_regret.png")
print("Saving average regret to %s..." % avg_regret_name)
ymax = -np.inf
TOSAVE = {}
for alg_name in total_experiments.keys():
regret = np.array(total_experiments[alg_name])
rep, T = regret.shape
t = np.arange(1, T + 1)
mean_regret = np.mean(regret, axis=0)
std = np.std(regret, axis=0) / np.sqrt(rep)
plt.plot(t[::EVERY], mean_regret[::EVERY], linewidth=LW, label=alg_name)
plt.fill_between(t[::EVERY],
mean_regret[::EVERY] - 2 * std[::EVERY], mean_regret[::EVERY] + 2 * std[::EVERY],
alpha=0.15)
ymax = max(ymax, mean_regret[-1] + 2 * std[-1])
M = np.concatenate((t.reshape(-1, 1), mean_regret.reshape(-1, 1), std.reshape(-1, 1)), axis=1)
TOSAVE[alg_name] = M
np.savez_compressed(os.path.join(folder, "avg_regret"), **TOSAVE)
plt.xlim([0, T])
plt.ylim([0, ymax])
plt.xlabel("Time")
plt.ylabel("Cumulative Regret")
plt.legend()
plt.savefig(avg_regret_name)
plt.close()
print("Done.\n")
for alg_name in avg_area.keys():
print("AverageAREA({}): {}".format(alg_name, np.mean(avg_area[alg_name])))
avg_margin_name = os.path.join(folder, "avg_margin.png")
print("Saving average margin to %s..." % avg_margin_name)
plt.figure(figsize=(10,10))
ymax = -np.inf
maxt = -np.inf
TOSAVE = {}
for alg_name in avg_margin.keys():
margin = np.array(avg_margin[alg_name])
rep, T = margin.shape
t = np.arange(1, T + 1)
mean_margin = np.mean(margin, axis=0)
std = np.std(margin, axis=0) / np.sqrt(rep)
idxs = mean_margin < 10
if np.sum(idxs) > 0:
plt.plot(t[idxs], mean_margin[idxs], linewidth=LW, label=alg_name)
plt.fill_between(t[idxs],
mean_margin[idxs] - 2 * std[idxs], mean_margin[idxs] + 2 * std[idxs],
alpha=0.15)
ymax = max(ymax, mean_margin[-1] + 2 * std[-1])
maxt = max(maxt, np.max(t[idxs]))
M = np.concatenate((t.reshape(-1,1), mean_margin.reshape(-1,1), std.reshape(-1,1)), axis=1)
TOSAVE[alg_name] = M
plt.xlim([0, maxt])
# plt.ylim([0, ymax])
plt.xlabel("Time")
plt.ylabel("Average Margin")
plt.legend()
plt.savefig(avg_margin_name)
plt.close()
np.savez_compressed(os.path.join(folder, "avg_margin"), **TOSAVE)
print("Done.\n")
# worst_name = os.path.join(folder, "worst_linear_exp.tex")
# print("Saving worst model to %s..." % worst_name)
# tikzplotlib.save(worst_name)
# print("Done.\n")
archive_name = "{}.tar.gz".format(folder)
print("Compressing files to %s..." % archive_name)
tardir(folder, archive_name)
print("Done.\n")
plt.show()
# # select "bad" model
# plt.figure(1,figsize=(10, 10))
# plt.figure(1)
# plt.clf()
# plt.title('Regret')
# ymax = -np.inf
# T = None
# for alg_name, res in results:
# val = res[0][1]
# print(alg_name)
# rep, T = val['cum_rewards'].shape
# t = np.arange(1, T + 1)
# regret = np.cumsum(val['regret'], axis=1)
# mean_regret = np.mean(regret, axis=0)
# std = np.std(regret, axis=0) / np.sqrt(rep)
# plt.plot(t[::EVERY], mean_regret[::EVERY], linewidth=LW, label=alg_name)
# plt.fill_between(t[::EVERY], mean_regret[::EVERY] - 2 * std[::EVERY], mean_regret[::EVERY] + 2 * std[::EVERY],
# alpha=0.15)
# ymax = max(ymax, mean_regret[-1] + 2 * std[-1])
#
# plt.xlim([0, T])
# plt.ylim([0, ymax])
# plt.legend()
# plt.savefig(os.path.join(folder, "real_data.png"))
# print("Done.\n")
#
# worst_name = os.path.join(folder, "real_data.tex")
# print("Saving worst model to %s..." % worst_name)
# tikzplotlib.save(worst_name)
#
# plt.figure(2,figsize=(10, 10))
# plt.figure(2)
# plt.clf()
# plt.title('Margin')
# ymax = -np.inf
# max_time = 1000
# T = None
# for alg_name, res in results:
# val = res[0][1]
# rep, T = val['cum_rewards'].shape
# t = np.arange(1, T + 1)
# margin = val['cum_rewards'].cumsum(axis = 1)
# print(alg_name, '=', margin.min())
# mean_margin = np.mean(margin, axis=0)
# std = np.std(margin, axis=0) / np.sqrt(rep)
# plt.plot(t[:max_time:EVERY], mean_margin[:max_time:EVERY], linewidth=LW, label=alg_name)
# plt.fill_between(t[:max_time:EVERY], mean_margin[:max_time:EVERY] - 2 * std[:max_time:EVERY], mean_margin[:max_time:EVERY] + 2 * std[:max_time:EVERY],
# alpha=0.15)
# #ymax = max(ymax, mean_regret[-1] + 2 * std[-1])
#
# plt.xlim([0, max_time])
# #plt.ylim([0, ymax])
# plt.legend()
# plt.savefig(os.path.join(folder, "real_data_margin.png"))
# print("Done.\n")
#
# worst_name = os.path.join(folder, "real_data.tex")
# print("Saving worst model to %s..." % worst_name)
# tikzplotlib.save(worst_name)
# print("Done.\n")
#
# archive_name = "{}.tar.gz".format(folder)
# print("Compressing files to %s..." % archive_name)
# tardir(folder, archive_name)
# print("Done.\n")
#
# plt.show()
| ContextualBanditsAttacks-main | examples/parse_real_data_results.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
from isoexp.monenvs import Env1
from sklearn.isotonic import IsotonicRegression
import matplotlib.pyplot as plt
import math
np.random.seed(521524)
from sklearn.utils import check_random_state
from isoexp._samplers import isotonic_data_bands
def paper_f(X, sigma):
v = 20 * X / 0.4 - (10 + 0.3 * 20 / 0.4)
v[X <= 0.3] = -10
v[X > 0.7] = 10
return v + sigma * np.random.randn(len(X))
env = Env1()
N = 450
sigma = 0.5
X = np.random.rand(N)
X = np.sort(X)
Y = env.f[0](X) + sigma * np.random.randn(N)
Y = paper_f(X, sigma=sigma)
# Y = (10*X)**3/ 3.4 + sigma * np.random.randn(N)
# X = np.random.rand(N)
# X = np.sort(X)
# rs = check_random_state(312312)
# Y = rs.randint(-10, 10, size=(N,)) + 10. * np.log1p(np.arange(N))
L = 20/0.4
# L = 3 * 100
plt.plot(X, Y, 'o', label="Y")
idx_vector = np.arange(N)
ir = IsotonicRegression()
ir = ir.fit(X, Y)
Y_iso = ir.transform(X)
plt.plot(X, Y_iso, '-d', label="iso(Y)")
plt.legend()
T = np.linspace(0.001, 0.999, 50)
f = ir.predict(T)
f[T < X[0]] = Y_iso[0]
f[T > X[-1]] = Y_iso[-1]
delta = 0.1
# for idx in range(len(T)):
# X_new = T[idx]
# if X_new < X[0]:
# lb = -L * np.abs(X_new - X[0]) - np.sqrt(2*np.log((N**2 + N)/delta))
# lbm = 1
# m = 1
# ub = np.inf
# while m <= N:
# val = np.mean(Y_iso[0:m]) - f[idx] + np.sqrt(2*np.log((N**2 + N)/delta) / m)
# if val < ub:
# ub = val
# ubm = m
# m += 1
# elif X_new > X[-1]:
# ub = L * np.abs(X_new - X[-1]) + np.sqrt(np.log(2*(N**2 + N)/delta))
# ubm = 1
# m = 1
# while m <= N:
# val = np.mean(Y_iso[N-m:N]) - f[idx] - np.sqrt(np.log(2*(N**2 + N)/delta) / m)
# if val > lb:
# lb = val
# lbm = m
# m += 1
# else:
#
# k = np.max(idx_vector[(X_new > X)]) + 1
# assert k == (np.sum(X_new > X)), "{},{}".format(k, (np.sum(X_new > X)))
# m = 1
# mtop = max(k, N - k)
# ub = np.inf
# lb = -np.inf
# while m <= mtop:
# if m <= k:
# val = np.mean(Y_iso[k - m:k + 1]) - f[idx] - np.sqrt(np.log(2*(N**2 + N)/delta) / m)
# if val > lb:
# lb = val
# lbm = m
#
# if m <= N - k:
# val = np.mean(Y_iso[k:k + m]) - f[idx] + np.sqrt(np.log(2*(N**2 + N)/delta) / m)
#
# if val < ub:
# ub = val
# ubm = m
# m += 1
#
# print(X_new, lbm, lb, ub, ubm)
# plt.plot(X_new, f[idx] + ub, 'r+')
# plt.plot(X_new, f[idx] + lb, 'g*')
# idx = N - 7
# print("N: {}".format(N))
# print(T[idx], f[idx])
# lb, ub = isotonic_data_bands(X, Y, T[idx], f[idx], L, sigma, delta)
# print()
# print(lb, ub)
# exit(312)
LUCB = np.zeros((len(T), 4))
plt.figure()
plt.plot(T, f, ':+', label="iso(t)")
plt.plot(X, Y_iso, 'o', label="iso(Y)")
for idx in range(len(T)):
X_new = T[idx]
y_new = f[idx]
lb, ub = isotonic_data_bands(X, Y_iso, X_new, y_new, L, sigma, delta)
LUCB[idx, 0:2] = [lb, ub]
# plt.plot(X_new, y_new + ub, 'r+', label="ub-iso(Y)")
# plt.plot(X_new, y_new + lb, 'g*', label="lb-iso(Y)")
# plt.figure()
# plt.plot(T, f, '+')
for idx in range(len(T)):
X_new = T[idx]
y_new = f[idx]
lb, ub = isotonic_data_bands(X, Y, X_new, y_new, L, sigma, delta)
LUCB[idx, 2:4] = [lb, ub]
# plt.plot(X_new, y_new + ub, 'r*', label="ub-Y")
# plt.plot(X_new, y_new + lb, 'g-', label="lb-Y")
print(LUCB)
plt.plot(T, f + LUCB[:,0], 'g*', label="lb-iso(Y)")
plt.plot(T, f + LUCB[:,1], 'r+', label="ub-iso(Y)")
plt.plot(T, f + LUCB[:,2], 'b4', label="lb-Y")
plt.plot(T, f + LUCB[:,3], 'ko', label="ub-Y", fillstyle='none')
plt.legend()
plt.show()
| ContextualBanditsAttacks-main | examples/show_confidence.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 1 14:23:15 2019
@author: evrardgarcelon
"""
import numpy as np
import pylab as plt
x = np.linspace(0,100)
y = np.linspace(0,50)
c_1 = 1
a = np.array([1,-1])
f = lambda c,d : c_1*np.sqrt(c+d) + a[0]*c + a[1]*d
X,Y = np.meshgrid(x,y)
res = f(X,Y)
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(x,y,res)
plt.show() | ContextualBanditsAttacks-main | examples/untitled0.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import datetime
import json
import logging
import os
import pickle
from collections import namedtuple
import numpy as np
from joblib import Parallel, delayed
from tqdm import tqdm
import sys
from math import sqrt
sys.path.append('/private/home/broz/workspaces/bandits_attacks')
from isoexp.contextual.contextual_linucb import ContextualLinearTS
from isoexp.contextual.contextual_models import DatasetModel, RandomContextualLinearArms
from isoexp.mab import contextual_arms
from isoexp.mab.contextual_mab_algs import contextEpsGREEDY, ContextualLinearBandit
import quadprog
from scipy.linalg import sqrtm
from scipy.optimize import minimize, linprog
import cvxpy as cp
from scipy import stats
from math import log
"""
TEST Linear Bandit
"""
logging_period = 1000
def compute_regret(theta, context, a_t):
D = np.dot(theta, context)
return np.max(D) - D[a_t]
def work(nb_arms, noise, n_features, T, random_state, attack_frequency, alg_name, weak_attack=False,
adversarial_xi=0.00001, method=None, sparse_attacks=False, simulator=None, target_arm=None, x_star=None, delta=0.99, reg_factor=0.1):
# create model
print(
f"adversarial {attack_frequency}, xi {adversarial_xi}, weak_attack {weak_attack} method {method}")
local_random = np.random.RandomState(random_state)
if simulator is None:
raise ValueError('No simulator')
# # real_theta = np.random.randn(nb_arms, n_features)
# # real_theta = np.random.uniform(low=1 / 2, high=3) * real_theta / np.linalg.norm(real_theta)
#
# # simulator = contextual_arms.ContextualLinearMABModel(theta=real_theta, noise=noise, random_state=local_random)
# simulator = RandomContextualLinearArms(n_actions=nb_arms, n_features=n_features, noise=noise, bound_context=1)
# target_context = np.random.randint(low=0, high=len(simulator.context_lists))
# x_star = simulator.context_lists[target_context]
# means_x_star = np.dot(simulator.thetas, x_star)
#
# target_arm = np.argmin(means_x_star)
# # print('the simulator is {}'.format(simulator))
simulator.local_random = local_random
all_rewards = np.dot(simulator.context_lists, simulator.thetas.T)
regret = []
rewards = []
norms = []
attacks = []
relative_attack_norm = []
contexts_norms = []
successful_attack = []
failed_attack = []
iteration = []
cumulative_regret = []
ratio_successful_attacks = []
sum_attacks_norms = []
nb_attacks_list = []
inv_design_worst_ratio = []
alg_names = []
biases = []
target_arm_chosen_list = []
target_arm_chosen_count = 0
x_star_appeared =0
a_star_in_x_star=0
a_star_in_x_star_list=[]
x_star_appeared_list =[]
TS_attacker=None
if alg_name == 'eps_greedy':
alg = contextEpsGREEDY(number_arms=simulator.n_actions, dimension=simulator.n_features, decrease_epsilon=True)
elif alg_name == 'LinUCB':
alg = ContextualLinearBandit(nb_arms=simulator.n_actions, dimension=simulator.n_features,
reg_factor=reg_factor, delta=delta,
bound_features=np.max(np.linalg.norm(simulator.thetas, axis=1)),
noise_variance=noise, bound_context=simulator.bound_context)
elif alg_name == 'LinTS':
alg = ContextualLinearTS(nb_arms=simulator.n_actions, dimension=simulator.n_features,
reg_factor=reg_factor,
delta=delta,
noise_variance=noise/5)
TS_attacker = TS_relaxed_attacks_calculator(simulator, alg, T)
else:
raise ValueError(f'Unknown alg_name {alg_name}')
cumulative_regret_t = 0
n_successful_attacks = 0
n_failed_attacks = 0
attack_sums = 0
nb_attacks = 0
for t in tqdm(range(T)):
context = simulator.get_context()
context_norm = norm(context)
if attack_frequency == 'target_arm':
is_attacked = is_equal(context, x_star)
else:
attack_proba = 1 / sqrt(t + 1) if attack_frequency == 'decrease_sqrt' else attack_frequency
is_attacked = local_random.rand() < attack_proba
if is_attacked:
predicted_best_arm = alg.get_action(context, deterministic=True)
if sparse_attacks:
# true_best_arm = np.argmax(simulator.theta.dot(context))
if predicted_best_arm == target_arm:
# print("no need to attack")
n_successful_attacks += 1
attack = 0
attack_norm = 0
else:
n_failed_attacks += 1
attack = compute_long_term_attack(simulator, predicted_best_arm, context, target_arm, all_rewards, factor=sparse_attacks)
attack_norm = norm(attack)
else:
estimated_rewards = alg.thetas.dot(context)
if weak_attack:
attack, attack_norm, attack_succeeded = compute_weak_attack(adversarial_xi, alg, predicted_best_arm,
context,
estimated_rewards, nb_arms)
else:
attack, attack_norm, attack_succeeded = compute_strong_attack(adversarial_xi, alg,
context,
estimated_rewards, method,
simulator.n_features,
simulator=simulator, target_arm=target_arm, x_star=x_star, attacker=TS_attacker)
if attack_norm == float('inf'):
attack = 0
attack_norm = 0
else:
attack_norm = 0
attack = 0
if attack_norm > 0:
nb_attacks += 1
if attack_norm < float('inf'):
attack_sums += attack_norm
attacked_context = context + attack
a_t = alg.get_action(attacked_context)
if is_attacked and not sparse_attacks:
if attack_succeeded:
assert t <= nb_arms or 0 < attack_norm < float(
'inf'), 'The attack is seen as successful but is zero or of infinite norm, the attack was {}'.format(
attack)
n_successful_attacks += 1
else:
n_failed_attacks += 1
r_t = simulator.reward(context, a_t)
regret_t = compute_regret(simulator.theta, context, a_t)
alg.update(attacked_context, a_t, r_t)
if is_equal(context, x_star):
x_star_appeared += 1
if a_t == target_arm:
a_star_in_x_star+=1
cumulative_regret_t += regret_t
if a_t == target_arm:
target_arm_chosen_count +=1
if t % logging_period == 0:
bias = (r_t - alg.thetas[a_t].dot(attacked_context)) / r_t
norm_error = np.linalg.norm(alg.thetas - simulator.theta, 2)
# logging
worst_ratio = None
for inv_a in alg.inv_design_matrices:
for i, col in enumerate(inv_a):
ratio = abs(max(col) / col[i])
if worst_ratio is None or worst_ratio < ratio:
worst_ratio = ratio
inv_design_worst_ratio.append(worst_ratio)
regret.append(
regret_t) # simulator.best_expected_reward(context) - simulator.expected_reward(action=a_t, context=context)
norms.append(norm_error)
rewards.append(r_t)
attacks.append(attack_norm)
iteration.append(t)
relative_attack_norm.append(norm(attacked_context) / context_norm)
contexts_norms.append(context_norm)
cumulative_regret.append(cumulative_regret_t)
ratio_successful_attacks.append(n_successful_attacks / (
n_failed_attacks + n_successful_attacks) if n_failed_attacks + n_successful_attacks else 0)
successful_attack.append(n_successful_attacks)
failed_attack.append(n_failed_attacks)
sum_attacks_norms.append(attack_sums)
nb_attacks_list.append(nb_attacks)
alg_names.append(alg_name)
biases.append(bias)
x_star_appeared_list.append(x_star_appeared)
target_arm_chosen_list.append(target_arm_chosen_count)
a_star_in_x_star_list.append(a_star_in_x_star)
logging.info(f"Iteration {t}, regret {regret_t}, reward{r_t}, norm error {norm_error}")
return {'iteration': iteration, "regret": regret, 'cumulative_regret': cumulative_regret, "rewards": rewards,
"norm_errors": norms, "attacks": attacks, 'target_arm_chosen': target_arm_chosen_list,
"relative_attack_norm": relative_attack_norm, 'contexts_norms': contexts_norms,
'successful_attack': ratio_successful_attacks, 'xi': adversarial_xi, 'biases': biases,
'attack_frequency': attack_frequency, 'sum_attacks_norms': sum_attacks_norms, 'weak_attack': weak_attack,
'x_star_appearances':x_star_appeared_list, 'a_star_in_x_star': a_star_in_x_star_list,
'method': method, 'sparse_attacks': sparse_attacks, "nb_attacks": nb_attacks_list,
'n_successful_attack': successful_attack, 'n_failed_attack': failed_attack,
'design_mat_worse_ratio': inv_design_worst_ratio, 'alg_names': alg_names}, simulator
def is_equal(context, x_star):
if x_star is None:
return False
return norm(context - x_star) < 1e-8
def compute_short_attack_linUCB(dimension, alg, a_star, x_star, slack=10 ** -5, relaxed=False):
func = lambda delta: np.linalg.norm(delta)/2
theta_a_star = alg.thetas[a_star]
P_a_star = sqrtm(alg.inv_design_matrices[a_star])
betas = alg.alpha()
constraints_list = []
for a in range(len(alg.thetas)):
if a != a_star:
theta = alg.thetas[a]
P = sqrtm(alg.inv_design_matrices[a])
if not(relaxed):
temp_constraint = lambda delta, P=P, P_a_star=P_a_star, theta=theta, theta_a_star=theta_a_star, beta=betas[a], beta_a_star=betas[a_star]: \
-((theta - theta_a_star).dot((x_star + delta)) + beta * np.linalg.norm(P.dot((x_star + delta)))
- beta_a_star * np.linalg.norm(P_a_star.dot((x_star + delta))) + slack)
else:
temp_constraint = lambda delta, P=P, P_a_star=P_a_star, theta=theta, theta_a_star=theta_a_star, beta=betas[a], beta_a_star=betas[a_star]: \
-((theta - theta_a_star).dot((x_star + delta)) + beta * np.linalg.norm(P.dot((x_star + delta))) + slack)
temp_cons = {'type': 'ineq', 'fun': temp_constraint}
constraints_list.append(temp_cons)
cons = tuple(constraints_list)
res = minimize(func, -x_star, method='SLSQP', constraints=cons)
# print(res.message)
try:
epsilon = res.x
except KeyboardInterrupt:
raise
except:
epsilon = np.zeros((dimension,))
# print('Epsilon =', epsilon)
# for a in range(len(constraints_list)):
# theta = alg.thetas[a]
# P = sqrtm(alg.inv_design_matrices[a])
# print('Constraints for arm {}'.format(a), constraints_list[a]['fun'](x_star + epsilon))
if epsilon is None:
return np.zeros((dimension,)), 0, False
return epsilon, norm(epsilon), norm(epsilon) > 0
def compute_relaxed_attack(dimension, alg, a_star, x_star, slack=10 ** -5):
delta = cp.Variable(dimension)
obj = cp.Minimize(cp.quad_form(delta, np.eye(dimension))/2)
theta_a_star = alg.thetas[a_star]
betas = alg.alpha()
constraints = []
for a in range(len(alg.thetas)):
if a != a_star:
theta = alg.thetas[a]
P = sqrtm(alg.inv_design_matrices[a])
temp_constraint = (theta - theta_a_star) @ (x_star + delta) + betas[a] * cp.norm2(P @ (x_star + delta))
constraints.append(temp_constraint + slack <= 0)
prob = cp.Problem(obj, constraints)
try:
out = prob.solve(solver='SCS', max_iters=10000,)
epsilon = delta.value
except KeyboardInterrupt:
raise
except:
print('Exception')
epsilon = np.zeros((dimension,))
if epsilon is None:
return np.zeros((dimension,)), 0, False
# if norm(epsilon > 0):
# margin = (theta - theta_a_star) @ (x_star + epsilon) + betas[a] * np.linalg.norm(np.dot(np.array(sqrtm(alg.inv_design_matrices[a])) ,(x_star + epsilon))) #np.sqrt(np.dot(x_star + epsilon, alg.inv_design_matrices[a] @ (x_star + epsilon)))
# # print(f'the margin is {margin}')
# if margin > 0 :
# print('the margin was negative, {}, norm eps {}, norm x {}'.format(out, norm(epsilon), norm(x_star)))
return epsilon, norm(epsilon), norm(epsilon) > 0
class TS_relaxed_attacks_calculator:
def __init__(self, simulator, alg, T):
delta_zero = 0.95
sigma = alg.sigma
nu = sigma * 3 * sqrt(simulator.n_features * log(T/delta_zero))
self.cste = nu * stats.norm.ppf(1 - delta_zero / (simulator.thetas.shape[0] -1))
def compute_relaxed_attack(self, dimension, alg, a_star, x_star, slack=10 ** -5):
delta = cp.Variable(dimension)
obj = cp.Minimize(cp.quad_form(delta, np.eye(dimension))/2)
theta_a_star = alg.thetas[a_star]
constraints = []
for a in range(len(alg.thetas)):
if a != a_star:
theta = alg.thetas[a]
P = sqrtm(alg.inv_design_matrices[a] + alg.inv_design_matrices[a_star])
temp_constraint = (theta - theta_a_star) @ (x_star + delta) + self.cste * cp.norm(P @ (x_star + delta))
constraints.append(temp_constraint + slack <= 0)
prob = cp.Problem(obj, constraints)
try:
prob.solve()#(feastol=1e-11, feastol_inacc=1e-11)
epsilon = delta.value
except KeyboardInterrupt:
raise
except:
print('Exception')
epsilon = np.zeros((dimension,))
# print('epsilon =', epsilon)
if epsilon is None:
return np.zeros((dimension,)), 0, False
return epsilon, norm(epsilon), norm(epsilon) > 0
def compute_strong_attack(adversarial_xi, alg, context, estimated_rewards, method, n_features, simulator, target_arm, x_star, attacker=None):
# worst_arm = np.argmin(estimated_rewards)
if method == 'linUCB_Relaxed':
alg.get_action(context)
attack, attack_norm, attack_succeeded = compute_relaxed_attack(simulator.n_features, alg, target_arm, context, slack=10 ** -9)
# attack, attack_norm, attack_succeeded = compute_short_attack_linUCB(simulator.n_features, alg, target_arm, x_star, slack=10 ** -10 , relaxed=False)
# if attack_succeeded:
# print(f'attack succeeded {attack_norm}')
# new_chosen = alg.get_action(context + attack, deterministic=True)
# if new_chosen != target_arm:
# new_context = context + attack
# print(f'algorithm chose arm {new_chosen} instead of {target_arm}')
# print(
# f'the scores were {alg.thetas[target_arm].dot(new_context) + alg.alpha()[target_arm] * np.sqrt(np.dot(new_context, np.dot(alg.inv_design_matrices[target_arm], new_context)))} vs {alg.thetas[new_chosen].dot(new_context) + alg.alpha()[new_chosen] * np.sqrt(np.dot(new_context, np.dot(alg.inv_design_matrices[new_chosen], new_context)))}, {norm(context+attack)}')
# print(
# f'with just attack the scores were {alg.thetas[target_arm].dot(attack)} vs {alg.thetas[new_chosen].dot(attack)}')
# # raise ValueError('Wrong chosen arm')
return attack, attack_norm, attack_succeeded
elif method == 'linUCB_full':
return compute_short_attack_linUCB(simulator.n_features, alg, target_arm, context, slack=10 ** -3, relaxed=False)
elif method == 'TS_Relaxed':
assert(attacker is not None), "Should pass an attacker to attack LinTS"
return attacker.compute_relaxed_attack(simulator.n_features, alg, target_arm, context, slack=10 ** -10)
elif method == 'quadprog':
try:
attack = quadprog_solve_qp(n_features, thetas=alg.thetas, arm_to_select=target_arm, context=context,
adversarial_xi=10**-5)
attack_norm = norm(attack)
attack_succeeded = True
except ValueError:
attack = 0
attack_norm = float('inf')
attack_succeeded = False
return attack, attack_norm, attack_succeeded
# elif method == 'heuristic':
# best_arm = np.argmax(estimated_rewards)
# attack, attack_norm = compute_attack_for_arm(worst_arm, best_arm, alg.thetas, estimated_rewards,
# adversarial_xi)
# attack_succeeded = alg.estimated_best_arm(context + attack) == worst_arm
else:
assert False, f'Unkown method for targeted attacks: {method}'
return attack, attack_norm, attack_succeeded
def compute_long_term_attack(simulator, action, context, a_star, all_rewards, slack=10 ** -3, factor=2):
if action != a_star: # and np.linalg.norm(context - x_star) < 10**-5:
# worst_ratio1 = (all_rewards[:, action] / all_rewards[:, a_star]).max()
worst_ratio = 1 / all_rewards[:, a_star].min()
# print(worst_ratio1, worst_ratio)
# assert(worst_ratio1 <= worst_ratio), 'there is a reward that is greater than 1'
delta = factor * worst_ratio
# print(f'delta: {delta}')
delta = max(delta, 1)
# delta = np.maximum(2*np.dot(model.thetas[action], x_star)/np.dot(model.thetas[a_star], x_star), 1)
epsilon = (delta - 1) * context
return epsilon
else:
return np.zeros((simulator.n_features,))
def compute_weak_attack(adversarial_xi, alg, best_arm, context, estimated_rewards, nb_arms):
possible_attacks = [compute_attack_for_arm(arm, best_arm, alg.thetas, estimated_rewards, adversarial_xi) for arm
in range(nb_arms) if arm != best_arm]
attack, attack_norm = possible_attacks[np.argmin([att[1] for att in possible_attacks])]
attack_succeeded = alg.estimated_best_arm(context + attack) != alg.estimated_best_arm(context)
return attack, attack_norm, attack_succeeded
def norm(vector):
return np.linalg.norm(vector, 2)
def compute_attack_for_arm(chosen_arm, best_arm, thetas, estimated_rewards, adversarial_xi):
attack_direction = thetas[chosen_arm] - thetas[best_arm]
norm = attack_direction.dot(attack_direction)
if norm == 0:
return 0, float('inf')
attack_norm = (estimated_rewards[best_arm] - estimated_rewards[chosen_arm] + adversarial_xi) / norm
attack = attack_norm * attack_direction
return attack, attack_norm
def generate_context(n_features, low=-3, high=3):
context = np.random.randn(n_features)
context = np.random.uniform(low=low, high=high) * context / np.linalg.norm(context)
return context
def quadprog_solve_qp(n_features, thetas, arm_to_select, context, adversarial_xi):
qp_G = np.identity(n_features) # make sure P is symmetric
qp_a = np.zeros_like(context)
# no equality constraint
constraints_lines = np.delete(thetas - thetas[arm_to_select], arm_to_select, axis=0)
qp_C = - constraints_lines.T
qp_b = constraints_lines.dot(context) + adversarial_xi
meq = 0
return quadprog.solve_qp(qp_G, qp_a, qp_C, qp_b, meq)[0]
# if __name__ == '__main__':
#
# PARALLEL = True
# print("PARALLEL: {}".format(PARALLEL))
#
# MABResults = namedtuple('MABResults', 'regret, cum_rewards, norm_errors')
#
# random_state = np.random.randint(0, 123123)
# np.random.seed(random_state)
# local_random = np.random.RandomState(random_state)
#
# print("seed: {}".format(random_state))
#
# K = 10
# n_features = 30
# a_noise = 0.1
#
# T = 5 * 10 ** 6 # horizon
# nb_simu = 15 # 48 * 5 #240
# adversarial_xi = 0.0001
#
# attack_frequencies = [1.0, 0.0] # [1.0, 'decrease_sqrt', 0]
# algo_names = ['LinUCB', 'eps-greedy', 'LinTS']
# weak_attacks_list = [False] # [False, True] #
# methods_to_test = [None] # ['quadprog', 'heuristic']
# sparse_factors = [2.0]
# results = []
#
# sparse_attacks = None
# movielens = True
# jester = False
# dataset_model = movielens or jester
# assert(not(movielens and jester)), "cannot use both movielens and jester"
# if dataset_model:
# if movielens:
# simulator = DatasetModel(os.path.abspath('examples/movielens/Vt_movielens.csv'), user_csvfile=os.path.abspath("examples/movielens/U.csv"), arms_limit=100)
# elif jester:
# simulator = DatasetModel(os.path.abspath("examples/jester/Vt_jester.csv"), user_csvfile=os.path.abspath('examples/jester/U.csv'))
# else:
# print('Issue, should use a dataset that isn\'t jester or movielens')
# exit(0)
# # target_context = np.random.randint(low=0, high=len(simulator.context_lists))
# # x_star = simulator.context_lists[target_context]
# means_x_star = np.dot(simulator.context_lists, simulator.thetas.T).mean(axis=0)
# target_arm = np.argmin(means_x_star)
# else:
# simulator = None
# target_arm = None
#
# settings = {
# "T": T,
# "nb_simu": nb_simu,
# "random_state": random_state,
# "K": simulator.n_actions if simulator else K,
# "dimension": simulator.n_features if simulator else n_features,
# 'attack_frequencies': attack_frequencies,
# 'weak_attacks': weak_attacks_list,
# 'methods_to_test': methods_to_test,
# 'adversarial_xi': adversarial_xi,
# 'sparse_factors': sparse_factors,
# 'target_arm': target_arm,
# }
# weak_attack=False
# method=None
# if PARALLEL:
# import multiprocessing
# work_to_be_done = []
# # for attack_frequency in attack_frequencies:
# # sparse_factors_to_test = sparse_factors if attack_frequency != 0 else [False]
# # for sparse_attacks in sparse_factors_to_test:
# # method = None
# # weak_attack = False
# # for weak_attack in weak_attacks_list if attack_frequency else [True]:
# # methods_to_test_list = methods_to_test if not weak_attack and attack_frequency != 0 else [
# # 'quadprog']
# # for method in methods_to_test_list:
# # for xi in adversarial_xi:
#
# for alg_name in algo_names:
# for sim_index in range(nb_simu):
# work_to_be_done.append(
# (attack_frequency, sparse_attacks, weak_attack, method, adversarial_xi, sim_index, alg_name))
#
# for sim_index in range(nb_simu):
# # work_to_be_done.append((0.2, 10, False, 'quadprog', xi, sim_index))
# work_to_be_done.append((0.2, 10, False, 'quadprog', adversarial_xi[0], sim_index))
# settings['work_list'] = work_to_be_done
# num_cores = multiprocessing.cpu_count()
# results.append(Parallel(n_jobs=num_cores, verbose=1)(
# delayed(work)(nb_arms=K, noise=a_noise, n_features=n_features, T=T, random_state=random_state + sim_index,
# attack_frequency=attack_frequency,alg_name=alg_name,
# weak_attack=weak_attack, adversarial_xi=xi, method=method,
# sparse_attacks=sparse_attacks, simulator=simulator, target_arm=target_arm) for
# attack_frequency, sparse_attacks, weak_attack, method, xi, sim_index, alg_name in work_to_be_done))
# else:
# # for decrease_epsilon in [True, False]:
# for attack_frequency in [0]: # [1.0,0.1, 0]:
# weak_attack = False
# for k in tqdm(range(nb_simu)):
# ret = work(nb_arms=K, noise=a_noise, n_features=n_features, T=T, random_state=random_state + k,
# attack_frequency=attack_frequency,
# weak_attack=weak_attack)
# results.append(ret)
#
#
# id = '{}_{:%Y%m%d_%H%M%S}_{}'.format('jester' if jester else 'movilens' if movielens else 'simulation', datetime.datetime.now(), '_Contextual_linear')
# pickle_name = "{}_{}_linear_results.pickle".format(id, "PAR" if PARALLEL else "SEQ")
# print(pickle_name)
# with open(pickle_name, "wb") as f:
# pickle.dump(results, f)
# # with open("{}_{}_linear_settings.json".format(id, "PAR" if PARALLEL else "SEQ"), "w+") as f:
# # json.dump(settings, f)
| ContextualBanditsAttacks-main | examples/linear_contextual_bandit.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
from isoexp.linear.linearbandit import EfficientLinearBandit, LinearBandit, LinPHE
from isoexp.conservative.linearmabs import EfficientConservativeLinearBandit, SafetySetCLUCB
from isoexp.linear.linearmab_models import RandomLinearArms, DiffLinearArms, OtherArms, CircleBaseline, LinPHEModel
from matplotlib import rc
from joblib import Parallel, delayed
from isoexp.linear.coldstart import ColdStartFromDatasetModel
import os
rc('text', usetex=True)
import matplotlib.pyplot as plt
from tqdm import tqdm
from collections import namedtuple
MABResults = namedtuple('MABResults', 'regret,norm_error, cum_rewards')
random_state = np.random.randint(0, 123123)
NOISE = 0.1
#model = RandomLinearArms(n_actions=300, n_features=100, noise=NOISE, bound_features = 5, bound_theta = 3)
model = ColdStartFromDatasetModel(csvfile=os.path.abspath('jester/Vt_jester.csv'), noise=NOISE)
theta_bound = np.linalg.norm(model.theta, 2)
means = np.dot(model.features, model.theta)
print(means)
idxs = np.argsort(means)
#baseline = np.random.randint(0, model.n_actions - 1)
baseline = idxs[-5]
mean_baseline = means[baseline]
optimal_arm = np.argmax(means)
PARALLEL = False
n_a = model.n_actions
d = model.n_features
T = 20000
batch_horizon = int(T*0.2)
nb_simu = 10
alpha = 0.1
algorithms = {
'EfficientLinearBandit': EfficientLinearBandit(arm_features=model.features,
reg_factor=1.,
delta=0.01,
noise_variance=NOISE,
bound_theta=theta_bound)
}
conservative_algorithms = {
# 'CLUCB-new': EfficientConservativeLinearBandit(model.features, baseline, mean_baseline,
# bound_theta=theta_bound, noise_variance=NOISE,
# reg_factor=1., delta=0.01, conservative_level=alpha,
# version = 'new', oracle = False, means = means,
# batched = False, check_every = batch_horizon, positive = True),
# 'CLUCB-old': EfficientConservativeLinearBandit(model.features, baseline, mean_baseline,
# bound_theta=theta_bound, noise_variance=NOISE,
# reg_factor=1., delta=0.01, conservative_level=alpha,
# version = 'old', oracle = False, means = means,
# batched = False, check_every = batch_horizon, positive = True),
# 'SafetySet-Old' : SafetySetCLUCB(model.features, baseline, mean_baseline,
# bound_theta=theta_bound, noise_variance=NOISE,
# reg_factor=1., delta=0.01, conservative_level=alpha,
# version = 'old', batched = False, check_every = batch_horizon, positive = True, verbose = False),
# 'SafetySet-new' : SafetySetCLUCB(model.features, baseline, mean_baseline,
# bound_theta=theta_bound, noise_variance=NOISE,
# reg_factor=1., delta=0.01, conservative_level=alpha,
# version = 'new', oracle = False, batched = False, check_every = batch_horizon, means = means,
# verbose = False, positive = True)
}
algorithms = {**algorithms, **conservative_algorithms}
if PARALLEL:
import multiprocessing
num_cores = multiprocessing.cpu_count()
def work(alg_name, alg):
regret = np.zeros((nb_simu, T))
norms = np.zeros((nb_simu, T))
cond = regret.copy()
for k in tqdm(range(nb_simu)):
alg.reset()
for t in range(T):
a_t = alg.get_action()
# print(a_t)
r_t = model.reward(a_t)
alg.update(a_t, r_t)
cond[k, t] = means[a_t] - (1-alpha)*mean_baseline
regret[k, t] = model.best_arm_reward() - r_t
if hasattr(alg, 'theta_hat'):
norms[k, t] = np.linalg.norm(alg.theta_hat - model.theta, 2)
# results[alg_name] = \
return alg_name, MABResults(regret=regret, norm_error=norms, cum_rewards = cond)
results = Parallel(n_jobs=num_cores, verbose=1)(
delayed(work)(alg_name, algorithms[alg_name]) for alg_name in algorithms.keys())
else:
from tqdm import trange
results = []
for alg_name in algorithms.keys():
regret = np.zeros((nb_simu, T))
norms = np.zeros((nb_simu, T))
cond = np.zeros((nb_simu, T))
nb = 0
draws = 0*regret
for k in tqdm(range(nb_simu), desc='Simulating {}'.format(alg_name)):
alg = algorithms[alg_name]
alg.reset()
for t in trange(T):
a_t = alg.get_action()
r_t = model.reward(a_t)
cond[k, t] = means[a_t] - (1-alpha)*mean_baseline
alg.update(a_t, r_t)
draws[k,t] = a_t
if a_t == baseline:
nb += 1
regret[k, t] = model.best_arm_reward() - r_t
if hasattr(alg, 'theta_hat'):
norms[k, t] = np.linalg.norm(alg.theta_hat - model.theta, 2)
results += [(alg_name, MABResults(regret=regret, norm_error=norms, cum_rewards=cond.cumsum(axis = 1)))]
#%%
plt.figure(1, figsize=(10, 10))
plt.figure(2, figsize=(10, 10))
for alg_name, val in results :
temp = val.regret
temp = temp.cumsum(axis = 1)
mean_regret = np.mean(temp, axis=0)
mean_norms = np.mean(val.norm_error, axis=0)
low_quantile = np.quantile(temp, 0.000, axis=0)
high_quantile = np.quantile(temp, 1, axis=0)
condition_satisfied = np.mean(val.cum_rewards, axis=0)
low_quantile_condition = np.quantile(val.cum_rewards, 0.25, axis=0)
high_quantile_condition = np.quantile(val.cum_rewards, 0.75, axis=0)
t = np.linspace(0, T-1, T, dtype='int')
# plt.subplot(131)
# # plt.plot(mean_norms, label=alg_name)
# plt.plot(mean_regret.cumsum() / (np.arange(len(mean_regret)) + 1), label=alg_name)
# plt.fill_between(t, low_quantile.cumsum() / (np.arange(len(mean_regret)) + 1),
# high_quantile.cumsum() / (np.arange(len(mean_regret)) + 1), alpha=0.15)
plt.figure(1)
print('mean_regret')
print(alg_name, ' = ', mean_regret[-1])
plt.fill_between(t, low_quantile, high_quantile, alpha = 0.15)
plt.plot(mean_regret, label=alg_name)
plt.title('d = {}'.format(model.n_features))
plt.figure(2)
print(alg_name, '= ', min(condition_satisfied.cumsum()))
print('-'*100)
# plt.plot(condition_satisfied, label=alg_name)
plt.title('d = {}'.format(model.n_features))
# plt.fill_between(t, low_quantile_condition, high_quantile_condition, alpha = 0.15)
if alg_name != 'EfficientLinearBandit':
plt.plot(condition_satisfied.cumsum()[:200], label=alg_name)
plt.fill_between(t[:200], low_quantile_condition.cumsum()[:200], high_quantile_condition.cumsum()[:200], alpha = 0.15)
#ax = plt.subplot(131)
## plt.ylabel(r'$\|\hat{\theta} - \theta\|_{2}$')
#plt.ylabel(r'$R_t / t$')
#plt.xlabel("Rounds")
## # Turn off tick labels
## ax.set_yticklabels([])
## ax.set_xticklabels([])
#plt.legend()
#
#ax = plt.subplot(132)
#plt.ylabel("Cumulative Regret")
#plt.xlabel("Rounds")
## # Turn off tick labels
## ax.set_yticklabels([])
## ax.set_xticklabels([])
#plt.legend()
#
##ax = plt.subplot(223)
##plt.title('Model')
##plt.scatter(model.features[:, 0], model.features[:, 1])
##optimal_arm = np.argmax(means)
##plt.scatter([model.features[optimal_arm, 0]], [model.features[optimal_arm, 1]], color='red', label='Optimal arm')
##plt.scatter([model.features[baseline, 0]], [model.features[baseline, 1]], color='cyan', label='Baseline arm')
##plt.scatter([model.theta[0]], [model.theta[1]], color='yellow', label='Theta')
### # Turn off tick labels
### ax.set_yticklabels([])
### ax.set_xticklabels([])
##plt.legend()
#
#ax = plt.subplot(133)
#plt.ylabel("Margin")
#plt.xlabel("Rounds")
# # Turn off tick labels
# ax.set_yticklabels([])
# ax.set_xticklabels([])
plt.figure(1)
plt.legend()
#plt.savefig("model_random_{}_{}_seed_{}.png".format(alpha, model.n_actions, random_state))
plt.show()
| ContextualBanditsAttacks-main | examples/main_linearmab.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import datetime
import json
import os
import pickle
import sys
from collections import namedtuple
import numpy as np
from joblib import Parallel, delayed
from tqdm import tqdm
sys.path.append('/private/home/broz/workspaces/bandits_attacks')
from isoexp.contextual.contextual_models import DatasetModel, RandomContextualLinearArms
from examples.linear_contextual_bandit import work
from scipy.optimize import minimize, linprog
"""
TEST Linear Bandit
"""
def in_hull(points, x):
n_points = len(points)
n_dim = len(x)
c = np.zeros(n_points)
A = np.r_[points.T, np.ones((1, n_points))]
b = np.r_[x, np.ones(1)]
lp = linprog(c, A_eq=A, b_eq=b)
return lp.success
if __name__ == '__main__':
PARALLEL = True
print("PARALLEL: {}".format(PARALLEL))
MABResults = namedtuple('MABResults', 'regret, cum_rewards, norm_errors')
random_state = np.random.randint(0, 123123)
np.random.seed(random_state)
local_random = np.random.RandomState(random_state)
print("seed: {}".format(random_state))
K = 10
n_features = 30
nb_arms=10
a_noise = 0.1
la = 1. / 2.
delta = 0.99
reg_factor = 0.1
just_a_test= True
if just_a_test:
T = 5 * 10 ** 4 # horizon
nb_simu = 5 # 48 * 5 #240
else:
T = int(1 * 10 ** 6) # horizon
nb_simu = 40 # 48 * 5 #240
adversarial_xi = 0.0001
noise=0.1
attack_frequencies = ['target_arm', 0.0] # [1.0, 'decrease_sqrt', 0]
algo_names = ['LinUCB', 'eps_greedy', 'LinTS']
weak_attacks_list = [False] # [False, True] #
methods_to_test = [None] # ['quadprog', 'heuristic']
sparse_factors = [None]
results = []
decrease_epsilon = True
movielens = True
jester = False
dataset_model = movielens or jester
assert(not(movielens and jester)), "cannot use both movielens and jester"
if dataset_model:
if movielens:
simulator = DatasetModel(os.path.abspath('examples/movielens/Vt_movielens.csv'), user_csvfile=os.path.abspath("examples/movielens/U.csv"), arms_limit=25, noise=noise, context_limit=100)
elif jester:
simulator = DatasetModel(os.path.abspath("examples/jester/Vt_jester.csv"), user_csvfile=os.path.abspath('examples/jester/U.csv'), noise=noise, context_limit=100)
else:
print('Issue, should use a dataset that isn\'t jester or movielens')
exit(0)
else:
simulator = RandomContextualLinearArms(n_actions=nb_arms, n_features=n_features, noise=noise, bound_context=1)
# target_context = np.random.randint(low=0, high=len(simulator.context_lists))
# x_star = simulator.context_lists[target_context]
# means_x_star = np.dot(simulator.thetas, x_star)
target_context = np.random.randint(low=0, high=len(simulator.context_lists))
x_star = simulator.context_lists[target_context]
means_x_star = np.dot(simulator.thetas, x_star)
target_arm = np.argmin(means_x_star)
method= 'linUCB_Relaxed'
settings = {
"T": T,
'models': algo_names,
"nb_simu": nb_simu,
"random_state": random_state,
"K": simulator.n_actions if simulator else K,
"dimension": simulator.n_features if simulator else n_features,
'attack_frequencies': attack_frequencies,
'weak_attacks': weak_attacks_list,
'methods_to_test': methods_to_test,
'adversarial_xi': adversarial_xi,
'sparse_factors': sparse_factors,
'target_arm': target_arm,
}
weak_attack=False
dataset_type = 'jester' if jester else 'movilens' if movielens else 'simulation'
print(f'running on {dataset_type}')
mask = np.ones(simulator.n_actions, dtype='int')
mask[target_arm] = 0
print(in_hull(x=simulator.thetas[target_arm], points=np.array(simulator.thetas[mask])))
if in_hull(x=simulator.thetas[target_arm], points=np.array(simulator.thetas[mask])):
raise ValueError()
if PARALLEL:
import multiprocessing
work_to_be_done = []
for alg_name in algo_names:
for attack_frequency in attack_frequencies:
for sparse_attacks in sparse_factors:
for sim_index in range(nb_simu):
work_to_be_done.append((attack_frequency, False, weak_attack, 'quadprog' if alg_name == 'eps_greedy' else 'linUCB_Relaxed' if alg_name == 'LinUCB' else 'TS_Relaxed' if alg_name=='LinTS' else None, adversarial_xi, sim_index, alg_name, x_star))
# for sim_index in range(nb_simu):
# # work_to_be_done.append((0.2, 10, False, 'quadprog', xi, sim_index))
# work_to_be_done.append((0.2, 10, False, 'quadprog', adversarial_xi[0], sim_index))
settings['work_list'] = work_to_be_done
num_cores = multiprocessing.cpu_count()
results.append(Parallel(n_jobs=num_cores, verbose=1)(
delayed(work)(nb_arms=K, noise=a_noise, n_features=n_features, T=T, random_state=random_state + sim_index,
attack_frequency=attack_frequency,alg_name=alg_name,
weak_attack=weak_attack, adversarial_xi=xi, method=method,
sparse_attacks=sparse_attacks, simulator=simulator, target_arm=target_arm, x_star=x_star) for
attack_frequency, sparse_attacks, weak_attack, method, xi, sim_index, alg_name, x_star in work_to_be_done))
else:
# for decrease_epsilon in [True, False]:
for attack_frequency in [0]: # [1.0,0.1, 0]:
weak_attack = False
for k in tqdm(range(nb_simu)):
ret = work(nb_arms=K, noise=a_noise, n_features=n_features, T=T, random_state=random_state + k,
attack_frequency=attack_frequency,
weak_attack=weak_attack)
results.append(ret)
id = '{}_{:%Y%m%d_%H%M%S}_{}_alg{}{}'.format(dataset_type, datetime.datetime.now(), 'linear_one_context', algo_names, '_test' if just_a_test else '')
pickle_name = "{}_{}_linear_results.pickle".format(id, "PAR" if PARALLEL else "SEQ")
print(pickle_name)
with open(pickle_name, "wb") as f:
pickle.dump(results, f)
with open("{}_{}_linear_settings.json".format(id, "PAR" if PARALLEL else "SEQ"), "w+") as f:
json.dump(settings, f)
| ContextualBanditsAttacks-main | examples/experiment_one_context.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import isoexp.mab.arms as arms
import pickle
from isoexp.mab.smab_algs import UCB1, UCBV, BootstrapedUCB, PHE, Random_exploration
from isoexp.conservative.mab import CUCBV, SafetySetUCBV, powerful_oracle, CBUCB, CUCB, CPHE
from matplotlib import rc
import json
import datetime
rc('text', usetex=True)
import matplotlib.pyplot as plt
from tqdm import tqdm
from collections import namedtuple
MABResults = namedtuple('MABResults', 'regret, cum_rewards')
random_state = np.random.randint(0, 123123)
random_state = 117060
K = 10
MAB = []
means = np.random.uniform(low = 0.25, high = 0.75, size = K)
means = np.array([0.47823152, 0.70243227, 0.64504063, 0.65679234, 0.49546542,
0.46417188, 0.64736977, 0.71255566, 0.66844984, 0.26030838])
for k in range(K) :
MAB.append(arms.ArmBernoulli(p = means[k]))
nb_arms = len(MAB)
print('means: {}'.format(means))
mu_max = np.max(means)
T = 10000# horizon
nb_simu = int(np.sqrt(T))
#Define baseline
pos = 3
baseline = np.argsort(means)[pos]
mean_baseline = MAB[baseline].mean
#Batch Version
conservative_level = 0.1
check_every = 2*np.sqrt(T)
settings = {
"T": T,
"nb_simu": nb_simu,
"random_state": random_state,
"K": K,
"baseline": pos,
"conservative_levels": conservative_level,
}
algorithms = {
'UCB': lambda T, MAB: UCB1(T, MAB, alpha=1),
# 'UCBV': lambda T, MAB: UCBV(T, MAB),
# 'BootstrapUCB' : lambda T, MAB: BootstrapedUCB(T, MAB, delta = 0.1, b_rep = 200),
'PHE' : lambda T, MAB : PHE(T, MAB, alpha =2),
# 'RE' : lambda T, MAB : Random_exploration(T, MAB, alpha = 3, verbose = False)
}
conservative_algorithms = {
'CUCB' : lambda T, MAB : CUCB(T, MAB, baseline, mean_baseline, conservative_level= conservative_level, oracle = False, version = 'old', batched = False, check_every = check_every, alpha = 1),
'Oracle UCB' : lambda T, MAB : CUCB(T, MAB, baseline, mean_baseline, conservative_level= conservative_level, oracle = True, version = 'old', batched = False, check_every = check_every, alpha = 1),
# 'CUCB-new' : lambda T, MAB : CUCB(T, MAB, baseline, mean_baseline, conservative_level= conservative_level, oracle = False, version = 'new', batched = False, check_every = check_every, alpha = 1),
# 'CPHE-new' : lambda T, MAB : CPHE(T, MAB, baseline, mean_baseline, conservative_level = conservative_level, param_a1 = 2, version = 'new', batched = False, check_every = check_every),
'CPHE' : lambda T, MAB : CPHE(T, MAB, baseline, mean_baseline, conservative_level = conservative_level, param_a1 = 2, version = 'old', batched = False, check_every = None),
# 'CPHE-oracle' : lambda T, MAB : CPHE(T, MAB, baseline, mean_baseline, conservative_level = conservative_level, param_a1 = 2, oracle = True),
#'SafetySetUCBV-old' : lambda T, MAB : SafetySetUCBV(T, MAB, baseline, mean_baseline, alpha=1., conservative_level= conservative_level, version ='old'),
#'SafetySetUCBV-new' : lambda T, MAB : SafetySetUCBV(T, MAB, baseline, mean_baseline, alpha=1., conservative_level= conservative_level, version = 'new')
}
results = []
full_algo = {**algorithms, **conservative_algorithms}
for alg_name in full_algo.keys():
alg = full_algo[alg_name]
regret = np.zeros((nb_simu, T))
rwds = 0*regret
for k in tqdm(range(nb_simu), desc='Simulating {}'.format(alg_name)):
if alg_name in ['SafetySetUCBV-old', 'SafetySetUCBV-new'] :
rewards, draws, safe = alg(T, MAB)
else :
rewards, draws = alg(T, MAB)
regret[k] = max(means) * np.arange(1, T + 1) - np.cumsum(rewards)
rwds[k] = np.cumsum(means[draws.astype('int')])
results += [(alg_name, MABResults(regret=regret, cum_rewards= rwds))]
id = '{:%Y%m%d_%H%M%S}_{}'.format(datetime.datetime.now(), 'GLM')
with open("{}_{}_MAB_illustration.pickle".format(id, "SEQ"), "wb") as f:
pickle.dump(results, f)
with open("{}_{}_MAB_illustration_settings.json".format(id, "SEQ"), "w+") as f:
json.dump(settings, f)
#%%
#plt.figure(1,figsize=(10, 10))
plt.figure(2,figsize=(10, 10))
t = np.arange(1, T+1)
for alg_name, val in results:
mean_regret = np.mean(val.regret, axis=0)
low_quantile = np.quantile(val.regret, 0.25, axis=0)
high_quantile = np.quantile(val.regret, 0.75, axis=0)
rwds = np.mean(val.cum_rewards, axis = 0)
low_quantile_rwds = np.quantile(val.cum_rewards, 0.25, axis=0)
high_quantile_rwds = np.quantile(val.cum_rewards, 0.75, axis=0)
#
# plt.figure(1)
# plt.title('Margin')
# temp = rwds - (1- conservative_level)*t*mean_baseline
# plt.plot(temp[:200], label = alg_name)
# plt.legend()
# plt.fill_between(t, low_quantile_rwds - (1- conservative_level)*t*mean_baseline, high_quantile_rwds - (1- conservative_level)*t*mean_baseline, alpha = 0.15)
print(alg_name, '=', min(rwds - (1- conservative_level)*t*mean_baseline))
plt.figure(2)
plt.title('Regret')
plt.plot(mean_regret, label=alg_name)
plt.legend()
plt.fill_between(t, low_quantile, high_quantile, alpha = 0.15)
plt.show()
#import tikzplotlib
#tikzplotlib.save("lcb_worst.tex")
| ContextualBanditsAttacks-main | examples/main_mab.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import datetime
import json
import os
import pickle
import sys
from collections import namedtuple
import numpy as np
from joblib import Parallel, delayed
from tqdm import tqdm
sys.path.append('/private/home/broz/workspaces/bandits_attacks')
from isoexp.contextual.contextual_models import DatasetModel, RandomContextualLinearArms
from examples.linear_contextual_bandit import work
"""
TEST Linear Bandit
"""
if __name__ == '__main__':
PARALLEL = True
print("PARALLEL: {}".format(PARALLEL))
MABResults = namedtuple('MABResults', 'regret, cum_rewards, norm_errors')
random_state = np.random.randint(0, 123123)
np.random.seed(random_state)
local_random = np.random.RandomState(random_state)
print("seed: {}".format(random_state))
K = 10
n_features = 30
nb_arms=10
a_noise = 0.1
la = 1. / 2.
delta = 0.99
reg_factor = 0.1
just_a_test = False
if just_a_test:
T = 1 * 10 ** 4 # horizon
nb_simu = 5 # 48 * 5 #240
else:
T = 1 * 10 ** 6 # horizon
nb_simu = 20 # 48 * 5 #240
adversarial_xi = 0.0001
noise=0.1
attack_frequencies = [1.0, 0.2, 0.0] # [1.0, 'decrease_sqrt', 0]
algo_names = ['LinUCB', 'eps_greedy', 'LinTS']
weak_attacks_list = [False] # [False, True] #
methods_to_test = [None] # ['quadprog', 'heuristic']
sparse_factors = [2.0]
results = []
decrease_epsilon = True
seed = 1
movielens = False
jester = False
dataset_model = movielens or jester
assert(not(movielens and jester)), "cannot use both movielens and jester"
if dataset_model:
if movielens:
simulator = DatasetModel(os.path.abspath('examples/movielens/Vt_movielens.csv'), user_csvfile=os.path.abspath("examples/movielens/U.csv"), arms_limit=25, noise=noise)
elif jester:
simulator = DatasetModel(os.path.abspath("examples/jester/Vt_jester.csv"), user_csvfile=os.path.abspath('examples/jester/U.csv'), noise=noise)
else:
print('Issue, should use a dataset that isn\'t jester or movielens')
exit(0)
else:
simulator = RandomContextualLinearArms(n_actions=nb_arms, n_features=n_features, noise=noise, random_state=seed, bound_context=1)
# target_context = np.random.randint(low=0, high=len(simulator.context_lists))
# x_star = simulator.context_lists[target_context]
# means_x_star = np.dot(simulator.thetas, x_star)
# target_context = np.random.randint(low=0, high=len(simulator.context_lists))
# x_star = simulator.context_lists[target_context]
means_x_star = np.dot(simulator.context_lists, simulator.thetas.T).mean(axis=0)
target_arm = np.argmin(means_x_star)
settings = {
"T": T,
'models': algo_names,
"nb_simu": nb_simu,
"random_state": random_state,
"K": simulator.n_actions if simulator else K,
"dimension": simulator.n_features if simulator else n_features,
'attack_frequencies': attack_frequencies,
'weak_attacks': weak_attacks_list,
'methods_to_test': methods_to_test,
'adversarial_xi': adversarial_xi,
'sparse_factors': sparse_factors,
'target_arm': target_arm,
}
weak_attack=False
method=None
dataset_type = 'jester' if jester else 'movilens' if movielens else 'simulation'
print(f'running on {dataset_type}')
if PARALLEL:
import multiprocessing
work_to_be_done = []
for alg_name in algo_names:
for attack_frequency in attack_frequencies:
for sparse_attacks in sparse_factors:
for sim_index in range(nb_simu):
work_to_be_done.append((attack_frequency, sparse_attacks/ attack_frequency if attack_frequency > 0 else 0, weak_attack, method, adversarial_xi, sim_index, alg_name))
# for sim_index in range(nb_simu):
# # work_to_be_done.append((0.2, 10, False, 'quadprog', xi, sim_index))
# work_to_be_done.append((0.2, 10, False, 'quadprog', adversarial_xi[0], sim_index))
settings['work_list'] = work_to_be_done
num_cores = multiprocessing.cpu_count()
results.append(Parallel(n_jobs=num_cores, verbose=1)(
delayed(work)(nb_arms=K, noise=a_noise, n_features=n_features, T=T, random_state=random_state + sim_index,
attack_frequency=attack_frequency,alg_name=alg_name,
weak_attack=weak_attack, adversarial_xi=xi, method=method,
sparse_attacks=sparse_attacks, simulator=simulator, target_arm=target_arm) for
attack_frequency, sparse_attacks, weak_attack, method, xi, sim_index, alg_name in work_to_be_done))
else:
# for decrease_epsilon in [True, False]:
for attack_frequency in [0]: # [1.0,0.1, 0]:
weak_attack = False
for k in tqdm(range(nb_simu)):
ret = work(nb_arms=K, noise=a_noise, n_features=n_features, T=T, random_state=random_state + k,
attack_frequency=attack_frequency,
weak_attack=weak_attack)
results.append(ret)
id = '{}_{:%Y%m%d_%H%M%S}_{}_alg{}'.format(dataset_type, datetime.datetime.now(), '_Contextual_linear_all_contextes', algo_names)
pickle_name = "{}_{}_linear_results.pickle".format(id, "PAR" if PARALLEL else "SEQ")
print(pickle_name)
with open(pickle_name, "wb") as f:
pickle.dump(results, f)
with open("{}_{}_linear_settings.json".format(id, "PAR" if PARALLEL else "SEQ"), "w+") as f:
json.dump(settings, f)
| ContextualBanditsAttacks-main | examples/experiment_all_contexts.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pickle
import tikzplotlib
import os
import shutil
import sys
from cycler import cycler
import tarfile
import json
ALPHA = 0.05
def tardir(path, tar_name):
with tarfile.open(tar_name, "w:gz") as tar_handle:
for root, dirs, files in os.walk(path):
for file in files:
tar_handle.add(os.path.join(root, file))
n = 9 # Number of colors
new_colors = [plt.get_cmap('Set1')(1. * i / n) for i in range(n)]
linestyle_cycler = cycler('linestyle', ['-', '--', ':', '-.', '-', '--', ':', '-.', '-'])
plt.rc('axes', prop_cycle=(cycler('color', new_colors) + linestyle_cycler))
plt.rc('lines', linewidth=2)
if len(sys.argv) == 2:
filename = sys.argv[1]
else:
filename = '20190822_065452_Bernoulli_PAR_martingale_results.pickle'
filename = '20190901_124136_linear_PAR_linear_results.pickle'
filename = '20190902_135139_linear_PAR_linear_results.pickle'
with open(filename, 'rb') as f:
results = pickle.load(f)
print("Opening file %s..." % filename)
setting_name = filename[:-14] + 'settings.json'
print('Opening settings %s...' % setting_name)
with open(setting_name, 'r') as f:
settings = json.load(f)
folder = filename.split('.')[0]
if os.path.exists(folder):
shutil.rmtree(folder)
os.makedirs(folder)
print("Done.\n")
with open(os.path.join(folder, setting_name), 'w') as f:
json.dump(settings, f)
EVERY = 200
LW = 2
LATEX = True
SAVE_MARGIN_FOR_EACH_MODEL=True
print("Generating all figures ...")
# select "bad" model
fpoint = open(os.path.join(folder, "scores.txt"), "w")
bad_model = None
min_val = np.inf
total_experiments = {}
for m, model in enumerate(results):
cucb_M, cucb_H = 0, 0
plt.figure()
ymax = -np.inf
T = None
for alg_name, val in model[1]:
rep, T = val['cum_rewards'].shape
if alg_name not in total_experiments.keys():
total_experiments[alg_name] = []
t = np.arange(1, T + 1)
regret = np.cumsum(val['regret'], axis=1)
mean_regret = np.mean(regret, axis=0)
std = np.std(regret, axis=0) / np.sqrt(rep)
plt.plot(t[::EVERY], mean_regret[::EVERY], linewidth=LW, label=alg_name)
plt.fill_between(t[::EVERY], mean_regret[::EVERY] - 2 * std[::EVERY], mean_regret[::EVERY] + 2 * std[::EVERY],
alpha=0.15)
ymax = max(ymax, mean_regret[-1] + 2 * std[-1])
total_experiments[alg_name] += regret.tolist()
if alg_name in ["CLUCB-new-{}-1".format(ALPHA)]:
rep, T = val['cum_rewards'].shape
regret = np.cumsum(val['regret'], axis=1)
mean_regret = np.mean(regret, axis=0)
std = np.std(regret, axis=0) / np.sqrt(rep)
cucb_M = mean_regret[-1] + 2 * std[-1]
if alg_name in ["CLUCB-old-{}-1".format(ALPHA)]:
rep, T = val['cum_rewards'].shape
regret = np.cumsum(val['regret'], axis=1)
mean_regret = np.mean(regret, axis=0)
std = np.std(regret, axis=0) / np.sqrt(rep)
cucb_H = mean_regret[-1] - 2 * std[-1]
val = abs(cucb_M - cucb_H) / cucb_H
print(m, val)
fpoint.write("{} {}\n".format(m, val))
if val < min_val:
bad_model = m
min_val = val
plt.xlim([0, T])
plt.ylim([0, ymax])
plt.legend()
plt.title("model: {}".format(m))
plt.savefig(os.path.join(folder, "model{}.png".format(m)))
if LATEX:
tikzplotlib.save(os.path.join(folder, "model{}.tex".format(m)))
plt.close()
fpoint.close()
print("Generating all figures for margin ...")
avg_area = {}
avg_margin = {}
for m, model in enumerate(results):
plt.figure()
ymax = -np.inf
ymin = np.inf
maxt = 0
T = None
print()
TOSAVE = {}
for alg_name, val in model[1]:
if alg_name not in avg_area.keys():
avg_area[alg_name] = []
avg_margin[alg_name] = []
rep, T = val['cum_rewards'].shape
margin = val['cum_rewards'].cumsum(axis=1)
t = np.arange(1, T + 1)
area = np.sum(margin * (margin < 0), axis=1).mean()
print('min_margin(', alg_name, ')=', margin.min())
print('area(', alg_name, ')=', area)
mean_margin = np.mean(margin, axis=0)
std = np.std(margin, axis=0) / np.sqrt(rep)
if SAVE_MARGIN_FOR_EACH_MODEL:
M = np.concatenate((t.reshape(-1, 1), mean_margin.reshape(-1, 1), std.reshape(-1, 1)), axis=1)
TOSAVE[alg_name] = M
avg_area[alg_name] += [area]
avg_margin[alg_name] += margin.tolist()
idxs = mean_margin < 10
if np.sum(idxs) > 0:
plt.plot(t[idxs], mean_margin[idxs], linewidth=LW, label=alg_name)
plt.fill_between(t[idxs],
mean_margin[idxs] - 2 * std[idxs], mean_margin[idxs] + 2 * std[idxs],
alpha=0.15)
ymin = min(ymin, np.min(mean_margin[idxs] - 2 * std[idxs]))
ymax = max(ymax, np.max(mean_margin[idxs] + 2 * std[idxs]))
maxt = max(maxt, np.max(t[idxs]))
if SAVE_MARGIN_FOR_EACH_MODEL:
np.savez_compressed(os.path.join(folder, "model{}_margin".format(m)), **TOSAVE)
plt.xlim([1, maxt])
plt.ylim([ymin, ymax])
plt.legend()
plt.title("model: {}".format(m))
plt.savefig(os.path.join(folder, "model{}_margin.png".format(m)))
if LATEX:
tikzplotlib.save(os.path.join(folder, "model{}_margin.tex".format(m)))
plt.close()
ymax = -np.inf
TOSAVE = {}
for alg_name in total_experiments.keys():
regret = np.array(total_experiments[alg_name])
rep, T = regret.shape
t = np.arange(1, T + 1)
mean_regret = np.mean(regret, axis=0)
std = np.std(regret, axis=0) / np.sqrt(rep)
plt.plot(t[::EVERY], mean_regret[::EVERY], linewidth=LW, label=alg_name)
plt.fill_between(t[::EVERY], mean_regret[::EVERY] - 2 * std[::EVERY], mean_regret[::EVERY] + 2 * std[::EVERY],
alpha=0.15)
ymax = max(ymax, mean_regret[-1] + 2 * std[-1])
M = np.concatenate((t.reshape(-1, 1), mean_regret.reshape(-1, 1), std.reshape(-1, 1)), axis=1)
TOSAVE[alg_name] = M
np.savez_compressed(os.path.join(folder, "avg_regret"), **TOSAVE)
plt.xlim([0, T])
plt.ylim([0, ymax])
plt.xlabel("Time")
plt.ylabel("Cumulative Regret")
plt.legend()
average_name = os.path.join(folder, "avg_regret.png")
print("Saving average performance to %s ..." % average_name)
plt.savefig(average_name)
average_name = os.path.join(folder, "avg_regret.tex")
tikzplotlib.save(average_name)
print("Done.\n")
avg_margin_name = os.path.join(folder, "avg_margin.png")
print("Saving average margin to %s..." % avg_margin_name)
plt.figure(figsize=(10, 10))
ymax = -np.inf
ymin = np.inf
maxt = -np.inf
TOSAVE = {}
for alg_name in avg_margin.keys():
margin = np.array(avg_margin[alg_name])
rep, T = margin.shape
t = np.arange(1, T + 1)
mean_margin = np.mean(margin, axis=0)
std = np.std(margin, axis=0) / np.sqrt(rep)
idxs = mean_margin < 2
if np.sum(idxs) > 0:
plt.plot(t[idxs], mean_margin[idxs], linewidth=LW, label=alg_name)
plt.fill_between(t[idxs],
mean_margin[idxs] - 2 * std[idxs], mean_margin[idxs] + 2 * std[idxs],
alpha=0.15)
ymin = min(ymin, np.min(mean_margin[idxs] - 2 * std[idxs]))
ymax = max(ymax, np.max(mean_margin[idxs] + 2 * std[idxs]))
maxt = max(maxt, np.max(t[idxs]))
M = np.concatenate((t.reshape(-1, 1), mean_margin.reshape(-1, 1), std.reshape(-1, 1)), axis=1)
TOSAVE[alg_name] = M
plt.xlim([1, maxt])
# plt.ylim([0, ymax])
plt.xlabel("Time")
plt.ylabel("Average Margin")
plt.legend()
plt.savefig(avg_margin_name)
average_name = os.path.join(folder, "avg_margin.tex")
tikzplotlib.save(average_name)
plt.close()
np.savez_compressed(os.path.join(folder, "avg_margin"), **TOSAVE)
print("Done.\n")
print(bad_model, min_val)
plt.figure(figsize=(10, 10))
plt.title("Model: {}".format(bad_model))
ymax = -np.inf
T = None
for model in [results[bad_model]]: # results:
print(model[2])
# for el in model[2]:
# print(el.mean)
for alg_name, val in model[1]:
print(alg_name)
rep, T = val['cum_rewards'].shape
t = np.arange(1, T + 1)
regret = np.cumsum(val['regret'], axis=1)
mean_regret = np.mean(regret, axis=0)
std = np.std(regret, axis=0) / np.sqrt(rep)
low_quantile = np.quantile(regret, 0.25, axis=0)
high_quantile = np.quantile(regret, 0.75, axis=0)
# rwds = np.mean(val['cum_rewards'], axis=0)
# low_quantile_rwds = np.quantile(val['cum_rewards'], 0.25, axis=0)
# high_quantile_rwds = np.quantile(val['cum_rewards'], 0.75, axis=0)
plt.plot(t[::EVERY], mean_regret[::EVERY], linewidth=LW, label=alg_name)
plt.fill_between(t[::EVERY], mean_regret[::EVERY] - 2 * std[::EVERY], mean_regret[::EVERY] + 2 * std[::EVERY],
alpha=0.15)
ymax = max(ymax, mean_regret[-1] + 2 * std[-1])
plt.xlim([0, T])
plt.ylim([0, ymax])
plt.legend()
plt.savefig(os.path.join(folder, "worst_linear_exp.png"))
print("Done.\n")
worst_name = os.path.join(folder, "worst_linear_exp.tex")
print("Saving worst model to %s..." % worst_name)
tikzplotlib.save(worst_name)
print("Done.\n")
archive_name = "{}.tar.gz".format(folder)
print("Compressing files to %s..." % archive_name)
tardir(folder, archive_name)
print("Done.\n")
plt.show()
| ContextualBanditsAttacks-main | examples/parse_linear_results.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
sys.path[0] = '/Users/evrard/Desktop/monotone_mabs/'
import numpy as np
import isoexp.linear.linearmab_models as arms
import isoexp.linear.linearbandit as mabs
import isoexp.conservative.linearmabs as cmabs
from tqdm import tqdm, trange
from joblib import Parallel, delayed
import math
import dill
import json
import datetime
from collections import namedtuple
"""
TEST GLM Bandit
Compare between martingale and sum of Hoffding bounds
"""
def work(m, nb_arms, nb_features, noise, b_pos, nb_simu, T, all_algs, random_state):
# create model
K = nb_arms
model = arms.RandomLogArms(n_actions = K,
n_features = n_features,
random_state = random_state + m,
bound_features = 1,
bound_theta = 1,
noise = noise)
means = model.link(np.dot(model.features,model.theta))
kappa = model.kappa
theta_bound = np.linalg.norm(model.theta, 2)
# Define baseline
baseline = np.argsort(means)[b_pos]
mean_baseline = means[baseline]
AAA = []
for alg_name in tqdm(all_algs.keys(), desc='Sim. model {}'.format(m)):
alg = all_algs[alg_name](model.features, noise, theta_bound,
mean_baseline, baseline, kappa = kappa)
regret = np.zeros((nb_simu, T))
rwds = regret.copy()
norms = regret.copy()
for k in trange(nb_simu, desc = 'Repetitions'):
alg.reset()
for t in trange(T, desc = 'Inside episode') :
a_t = alg.get_action()
r_t = model.reward(a_t)
if hasattr(alg, 'conservative_level'):
rwds[k,t] = means[a_t] - (1 - alg.conservative_level)*mean_baseline
else :
rwds[k,t] = means[a_t]
alg.update(a_t, r_t)
regret[k, t] = model.best_arm_reward() - means[a_t]
if hasattr(alg, 'theta_hat'):
norms[k, t] = np.linalg.norm(alg.theta_hat - model.theta, 2)
AAA += [(alg_name, {"regret": regret, "cum_rewards": rwds.cumsum(axis = 1), "norm_errors" : norms})]
return m, AAA, model, mean_baseline
# ----------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
PARALLEL = True
print("PARALLEL: {}".format(PARALLEL))
MABResults = namedtuple('MABResults', 'regret, cum_rewards, norm_errors')
random_state = np.random.randint(0, 123123)
np.random.seed(random_state)
print("seed: {}".format(random_state))
K = 20
n_features = 10
a_noise = 0.1
delta = 0.01
la = 1/4
T = 3000 # horizon
nb_models = 4
nb_simu = int(np.sqrt(T))
CLEVELS = [0.1]
BATCHES = [1]
pos = 15
settings = {
"T": T,
"nb_simu": nb_simu,
"random_state": random_state,
"K": K,
"dimension" : n_features,
"baseline": pos,
"conservative_levels": CLEVELS,
"batches": BATCHES
}
algorithms = {
'GLM-UCB': lambda feat, noise, b_theta, mean_b = 0, b = 0, alpha = 0, kappa = 1 : mabs.UCB_GLM(
reg_factor=la,
delta=delta,
arm_features = feat,
noise_variance = noise,
bound_theta = b_theta,
kappa = kappa,
model = 'bernoulli',
tighter_ucb = True)
}
conservative_algorithms = {}
for conservative_level in CLEVELS:
conservative_algorithms.update(
{
"GLM-CUCB-{}".format(conservative_level):
lambda feat, noise, b_theta, mean_b, b, alpha = conservative_level, kappa = 1:
cmabs.CUCB_GLM(arm_features = feat,
noise_variance = noise,
bound_theta = b_theta,
mean_baseline = mean_b,
baseline = b,
reg_factor = la,
delta = delta,
conservative_level = alpha,
kappa = kappa,
tighter_ucb = True,
model = 'bernoulli'), }
)
results = []
full_algo = {**algorithms, **conservative_algorithms}
if PARALLEL:
import multiprocessing
num_cores = multiprocessing.cpu_count()
results = Parallel(n_jobs=num_cores, verbose=1)(
delayed(work)(m=m, nb_arms=K, nb_features = n_features, noise = a_noise, b_pos=pos,
nb_simu=nb_simu, T=T, all_algs=full_algo,
random_state=random_state) for m in range(nb_models))
else:
for m in tqdm(range(nb_models)):
ret = work(m, K, n_features, a_noise, pos, nb_simu, T, full_algo, random_state)
results.append(ret)
# MAB = []
# means = None
# if ARMS == "Bernoulli":
# means = np.random.uniform(low=0.25, high=0.75, size=K)
# for n in range(K):
# MAB.append(arms.ArmBernoulli(p=means[n], random_state=random_state + n))
# elif ARMS == "TruncatedNormal":
# means = np.random.uniform(low=0., high=1., size=K)
# sigmas = np.random.uniform(low=0.1, high=1., size=K)
# for n in range(K):
# MAB.append(arms.ArmTruncNorm(original_mean=means[n], a=0, b=1, original_std=sigmas[n]))
# means[n] = MAB[n].mean
# sigmas[n] = MAB[n].sigma
# else:
# raise ValueError("Unknown arm type")
#
# AAA = []
# for alg_name in full_algo.keys():
# alg = full_algo[alg_name]
#
# regret = np.zeros((nb_simu, T))
# rwds = 0 * regret
#
# for k in range(nb_simu):
# # Define baseline
# baseline = np.argsort(means)[pos]
# mean_baseline = MAB[baseline].mean
#
# rewards, draws = alg(T, MAB, baseline, mean_baseline)
# regret[k] = max(means) * np.arange(1, T + 1) - np.cumsum(rewards)
# rwds[k] = np.cumsum(means[draws.astype('int')])
#
# AAA += [(alg_name, {"regret": regret, "cum_rewards": rwds})]
#
# results += [(m, AAA, MAB)]
results += [CLEVELS]
id = '{:%Y%m%d_%H%M%S}_{}'.format(datetime.datetime.now(), 'GLM')
with open("{}_{}_GLM_results.pickle".format(id, "PAR" if PARALLEL else "SEQ"), "wb") as f:
dill.dump(results, f)
with open("{}_{}_GLM_settings.json".format(id, "PAR" if PARALLEL else "SEQ"), "w+") as f:
json.dump(settings, f)
| ContextualBanditsAttacks-main | examples/runner_GLM.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import os
from cycler import cycler
import matplotlib.pyplot as plt
import tikzplotlib
n = 9 # Number of colors
new_colors = [plt.get_cmap('Set1')(1. * i / n) for i in range(n)]
linestyle_cycler = cycler('linestyle', ['-', '--', ':', '-.', '-', '--', ':', '-.', '-'])
plt.rc('axes', prop_cycle=(cycler('color', new_colors) + linestyle_cycler))
plt.rc('lines', linewidth=2)
what = 'regret'
what = 'margin'
EVERY = 200
LW = 2
folders = ["20190905_043752_Bernoulli_PAR_martingale_results"]
plt.figure(figsize=(20, 10))
T = 0
if what == 'margin':
ymax = np.inf
else:
ymax = -np.inf
for fname in folders:
M = np.load(os.path.join(fname, "avg_{}.npz".format(what)), mmap_mode='r')
for alg in M.files:
if not alg in ["CUCB-oracle-0.05", "CUCB-new-0.05-1",
"CUCB-LBS-new-0.05-1",
"CSUCB-old-0.05-1","CUCB-LBS-old-0.05-1"]:
data = M[alg]
t = data[:, 0]
yval = data[:, 1]
std = data[:, 2]
plt.plot(t[::EVERY], yval[::EVERY], linewidth=LW, label=alg)
plt.fill_between(t[::EVERY],
yval[::EVERY] - 2 * std[::EVERY], yval[::EVERY] + 2 * std[::EVERY],
alpha=0.15)
if what == 'margin':
ymax = min(ymax, np.min(yval - 2 * std))
else:
ymax = max(ymax, yval[-1] + 2 * std[-1])
T = max(T, np.max(t))
plt.plot([0,T], [0, 0], '-', c='gray', linewidth=0.8)
plt.xlim([0, T])
# if ymax > 0:
# plt.ylim([0, ymax])
# else:
# plt.ylim([ymax, 5])
plt.xlabel("Time")
if what == "regret":
plt.ylabel("Cumulative Regret")
else:
plt.ylabel("Average Budget")
plt.legend()
plt.savefig("jester_average_{}.png".format(what))
tikzplotlib.save("jester_average_{}.tex".format(what))
plt.show()
| ContextualBanditsAttacks-main | examples/merge_real_data_results.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 22 15:37:36 2019
@author: evrard
"""
filename = '/Users/evrardgarcelon/Desktop/monotone_mabs/20190825_113802_GLM_PAR_GLM_results.pickle'
import dill
import numpy as np
import pylab as plt
with open(filename, 'rb') as f:
results = dill.load(f)
n_models = 1
n_algos = len(results[0])
nb_simu,T = results[0][1][0][1]['regret'].shape
clevels = results[-1]
baseline_means = np.zeros(n_models)
q = 0.25
t = np.linspace(0, T-1,T, dtype = 'int')
nb_first_iteration = 50
algorithms = {}
true_alg_name = {'GLM-UCB': 'UCB-GLM', 'GLM-CUCB-0.1': 'CUCB-GLM-0.1'}
for alg_name, res in results[0][1] :
algorithms[true_alg_name[alg_name]] = {'regret' : np.zeros((n_models,nb_simu, T)),
'cum_rewards' : np.zeros((n_models,nb_simu, T)),
'norm_errors' : np.zeros((n_models,nb_simu, T))}
for m in range(n_models) :
res = results[m][1]
baseline_means[m] = results[m][-1]
for i,val in enumerate(res) :
alg_name = val[0]
val = val[1]
algorithms[true_alg_name[alg_name]]['regret'][m,:,:] = val['regret']
algorithms[true_alg_name[alg_name]]['cum_rewards'][m,:,:] = val['cum_rewards']
algorithms[true_alg_name[alg_name]]['norm_errors'][m,:,:] = val['norm_errors']
plt.figure(1, figsize = (10,10))
plt.figure(2, figsize = (10,10))
regret = {}
margin = {}
for alg_name, res in algorithms.items() :
temp = res['regret'].cumsum(axis = 2)
mean_regret = np.mean(temp, axis = (0,1))
low_quantile = np.quantile(temp,q, axis = (0,1))
high_quantile = np.quantile(temp, 1-q, axis = (0,1))
regret[alg_name] = (mean_regret, low_quantile, high_quantile)
plt.figure(1)
plt.plot(mean_regret, label = alg_name)
plt.fill_between(t, low_quantile, high_quantile, alpha = 0.15)
if alg_name != 'UCB-GLM' :
res['cum_rewards'] = res['cum_rewards'].cumsum(axis = 2)
mean_margin = np.mean(res['cum_rewards'], axis = (0,1))
low_quantile = np.quantile(res['cum_rewards'], q, axis = (0,1))
high_quantile = np.quantile(res['cum_rewards'], 1-q, axis = (0,1))
margin[alg_name] = (mean_margin, low_quantile, high_quantile)
else :
for alpha in clevels :
a_name = alg_name + '-{}'.format(alpha)
temp = 1*algorithms[alg_name]['cum_rewards']
for m in range(n_models) :
temp[m] = temp[m] - (1-alpha)*baseline_means[m]
temp = temp.cumsum(axis = 2)
mean_margin = np.mean(temp, axis = (0,1))
low_quantile = np.quantile(temp, q, axis = (0,1))
high_quantile = np.quantile(temp, 1-q, axis = (0,1))
margin[a_name] = (mean_margin[:nb_first_iteration], low_quantile[nb_first_iteration], high_quantile[nb_first_iteration])
plt.figure(2)
plt.plot(mean_margin[:nb_first_iteration], label = alg_name)
plt.fill_between(t[:nb_first_iteration], low_quantile[:nb_first_iteration], high_quantile[:nb_first_iteration], alpha = 0.15)
plt.figure(2)
plt.plot(t[:nb_first_iteration], np.zeros(nb_first_iteration), color = 'red', linestyle = '--', label = '0')
plt.figure(1)
plt.legend()
plt.figure(2)
plt.legend()
plt.show()
| ContextualBanditsAttacks-main | examples/reader_GLM.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from isoexp.isotonicsim import LIsotron
import matplotlib.pyplot as plt
import numpy as np
from isoexp.LPAV_cvx import cvx_lip_isotonic_regression
N = 500
m = 5
X = np.random.rand(N*m).reshape(N, m)
w = np.random.rand(m)
orda = np.argsort(np.dot(X, w))
X = X[orda, :]
y = 2*np.dot(X, w)
y = np.dot(X, w)**3 # + np.random.randn(N)
x = np.dot(X, w)
#reg = LIsotron()
#yn = reg.fit_transform(X, y, lipschitz_value=1, iterations=50)
ones = np.zeros_like(y)
iterations=400
wt = np.random.rand(X.shape[1])
wt = np.zeros(X.shape[1])
for t in range(iterations):
zt = np.dot(X, wt)
order = np.argsort(zt)
zt = zt[order]
print(zt)
y_iso = cvx_lip_isotonic_regression(x=zt, y=y[order], weights=ones, lipschitz_value=10)
print(y_iso)
print(y)
# plt.plot(xt, y[order], 'o')
# plt.plot(xt, y_iso, '+')
# plt.show()
wt = wt + np.mean((y[order] - y_iso)[:, np.newaxis] * X[order, :], axis=0)
print("true weights: {}".format(w))
print("weights: {}".format(wt))
plt.figure()
plt.plot(np.dot(X, w), y, '+', label="true")
#plt.plot(np.dot(X, w), np.dot(X, wt), 'o', label="learnt")
plt.plot(np.dot(X, w), y_iso, 'o', label="learnt2")
plt.legend()
plt.show()
| ContextualBanditsAttacks-main | examples/run_lisotron.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from isoexp.knnmab import KnnMab
from isoexp.isomab import IsoMab
import isoexp.monenvs as monenvs
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
import time
np.random.seed(12345)
env = monenvs.Env1()
env.show()
plt.show()
# define algorithms
knn_a = KnnMab(env=env, Lc=100)
iso_a = IsoMab(env=env, Lc=100)
algs = [(knn_a, "knn"),(iso_a, "iso")]
# define params
rep = 2
T = 2500
## force compilation of the function
from isoexp.knnmab import knn_select_arm
start = time.time()
knn_select_arm(np.zeros((4,5)), np.zeros((4,)), -1, 1, 3, 1)
end = time.time()
print("Elapsed (with compilation) = %s" % (end - start))
# from isoexp.isomab import construct_datadep_ci
# start = time.time()
# construct_datadep_ci(np.zeros((6,)), np.zeros((6,)), np.zeros((1,)), np.zeros((1,)), 1, -1)
# end = time.time()
# print("Elapsed (with compilation) = %s" % (end - start))
# prepare figure
rf = plt.figure(99)
for alg in algs:
regrets = np.zeros((rep, T))
for i in tqdm(range(rep)):
alg[0].reset()
regret = alg[0].run(iterations=T)
cr = np.cumsum(regret)
regrets[i,:] = cr
plt.figure(99)
mu = regrets.mean(axis=0)
sigma = regrets.std(axis=0) / np.sqrt(rep)
p = plt.plot(mu, label=alg[1])
plt.fill_between(np.arange(len(mu)), mu + 2*sigma, mu - 2*sigma, facecolor=p[-1].get_color(), alpha=0.5)
plt.figure()
X = np.linspace(0, 1, 100)
arms = np.zeros_like(X)
for i in range(len(X)):
arms[i] = alg[0].select_arm(np.array([X[i]]))
plt.plot(X, arms, '+')
plt.title("Arm selection")
plt.xlabel("Covariate X")
plt.ylabel("Arm")
plt.title(alg[1])
plt.figure(99)
plt.xlabel("Time")
plt.ylabel("Cumulative Regret")
plt.legend()
plt.show()
| ContextualBanditsAttacks-main | examples/main.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pickle
import tikzplotlib
import os
import sys
import shutil
from cycler import cycler
import tarfile
import json
ALPHA = 0.05
NAMES = {
'UCB': "UCB",
'CUCB-oracle-{}'.format(ALPHA): 'CUCB-Or',
'CUCB-old-{}-1'.format(ALPHA): 'CUCB',
'CUCB-new-{}-1'.format(ALPHA): 'CUCB-M',
'CSUCB-new-{}-1'.format(ALPHA): 'CUCB2',
'CSUCB-old-{}-1'.format(ALPHA): 'CUCB-C',
'CUCB-LBS-new-{}-1'.format(ALPHA): 'CUCB-ML',
'CUCB-LBS-old-{}-1'.format(ALPHA): 'CUCB-L',
}
def tardir(path, tar_name):
with tarfile.open(tar_name, "w:gz") as tar_handle:
for root, dirs, files in os.walk(path):
for file in files:
tar_handle.add(os.path.join(root, file))
n = 12 # Number of colors
new_colors = [plt.get_cmap('Set1')(1. * i / n) for i in range(n)]
linestyle_cycler = cycler('linestyle', ['-', '--', ':', '-.', '-', '--', ':', '-.', '-', '--', ':', '-.'])
plt.rc('axes', prop_cycle=(cycler('color', new_colors) + linestyle_cycler))
plt.rc('lines', linewidth=2)
if len(sys.argv) == 2:
filename = sys.argv[1]
else:
filename = '20190822_065452_Bernoulli_PAR_martingale_results.pickle'
print("Opening file %s..." % filename)
with open(filename, 'rb') as f:
results = pickle.load(f)
print("Done.\n")
setting_name = filename[:-14] + 'settings.json'
print('Opening settings %s...' % setting_name)
with open(setting_name, 'r') as f:
settings = json.load(f)
baseline_pos = settings["baseline"]
conservative_levels = settings['conservative_levels'][-1]
folder = filename.split('.')[0]
if os.path.exists(folder):
shutil.rmtree(folder)
os.makedirs(folder)
with open(os.path.join(folder, setting_name), 'w') as f:
json.dump(settings, f)
EVERY = 100
LW = 2
LATEX = True
print("Generating all figures for regret ...")
# select "bad" model
fpoint = open(os.path.join(folder, "scores.txt"), "w")
bad_model = None
min_val = np.inf
total_experiments = {}
for m, model in enumerate(results):
cucb_M, cucb_H = 0, 0
plt.figure()
ymax = -np.inf
T = None
for alg_name, val in model[1]:
if alg_name not in total_experiments.keys():
total_experiments[alg_name] = []
rep, T = val['cum_rewards'].shape
t = np.arange(1, T + 1)
mean_regret = np.mean(val['regret'], axis=0)
total_experiments[alg_name] += val['regret'].tolist()
std = np.std(val['regret'], axis=0) / np.sqrt(rep)
plt.plot(t[::EVERY], mean_regret[::EVERY], linewidth=LW, label=NAMES[alg_name])
plt.fill_between(t[::EVERY], mean_regret[::EVERY] - 2 * std[::EVERY], mean_regret[::EVERY] + 2 * std[::EVERY],
alpha=0.15)
ymax = max(ymax, mean_regret[-1] + 2 * std[-1])
if alg_name in ["CUCB-new-{}-1".format(ALPHA)]:
rep, T = val['cum_rewards'].shape
mean_regret = np.mean(val['regret'], axis=0)
std = np.std(val['regret'], axis=0) / np.sqrt(rep)
cucb_M = mean_regret[-1] + 2 * std[-1]
if alg_name in ["CUCB-old-{}-1".format(ALPHA)]:
rep, T = val['cum_rewards'].shape
mean_regret = np.mean(val['regret'], axis=0)
std = np.std(val['regret'], axis=0) / np.sqrt(rep)
cucb_H = mean_regret[-1] - 2 * std[-1]
val = abs(cucb_M - cucb_H) / cucb_H
print(m, val)
fpoint.write("{} {}\n".format(m, val))
if val < min_val:
bad_model = m
min_val = val
plt.xlim([0, T])
plt.ylim([0, ymax])
plt.xlabel("Time")
plt.ylabel("Cumulative Regret")
plt.legend()
# plt.title("model: {}".format(m))
plt.savefig(os.path.join(folder, "model{}.png".format(m)))
if LATEX:
tikzplotlib.save(os.path.join(folder, "model{}.tex".format(m)))
plt.close()
fpoint.close()
print("Done.\n")
print("Generating all figures for margin ...")
avg_area = {}
avg_margin = {}
for m, model in enumerate(results):
plt.figure()
ymin = np.inf
ymax = -np.inf
maxt = 0
T = None
means = [el.mean for el in model[-1]]
action_b = np.argsort(means)[baseline_pos]
mean_baseline = means[action_b]
TOSAVE = {}
for alg_name, val in model[1]:
if alg_name not in avg_area.keys():
avg_area[alg_name] = []
avg_margin[alg_name] = []
rep, T = val['cum_rewards'].shape
t = np.arange(1, T + 1)
cum_rewards = val['cum_rewards']
margin = cum_rewards - (1 - conservative_levels) * t * mean_baseline
mean_margin = np.mean(margin, axis=0)
std = np.std(margin, axis=0) / np.sqrt(rep)
M = np.concatenate((t.reshape(-1, 1), mean_margin.reshape(-1, 1), std.reshape(-1, 1)), axis=1)
TOSAVE[alg_name] = M
idxs = mean_margin < 2
if np.sum(idxs) > 0:
plt.plot(t[idxs], mean_margin[idxs], linewidth=LW, label=alg_name)
plt.fill_between(t[idxs],
mean_margin[idxs] - 2 * std[idxs], mean_margin[idxs] + 2 * std[idxs],
alpha=0.15)
ymin = min(ymin, np.min(mean_margin[idxs] - 2 * std[idxs]))
ymax = max(ymax, np.max(mean_margin[idxs] + 2 * std[idxs]))
maxt = max(maxt, np.max(t[idxs]))
area = np.sum(margin * (margin < 0), axis=1).mean()
print('min_margin(', alg_name, ')=', margin.min())
print('area(', alg_name, ')=', area)
print()
avg_area[alg_name] += [area]
avg_margin[alg_name] += margin.tolist()
np.savez_compressed(os.path.join(folder, "model{}_margin".format(m)), **TOSAVE)
plt.xlim([1, maxt])
plt.ylim([ymin, ymax])
plt.xlabel("Time")
plt.ylabel("Average Margin")
plt.legend()
# plt.title("model: {}".format(m))
plt.savefig(os.path.join(folder, "model{}_margin.png".format(m)))
if LATEX:
tikzplotlib.save(os.path.join(folder, "model{}_margin.tex".format(m)))
plt.close()
print("Done.\n")
ymax = -np.inf
for alg_name in total_experiments.keys():
regret = np.array(total_experiments[alg_name])
rep, T = regret.shape
t = np.arange(1, T + 1)
mean_regret = np.mean(regret, axis=0)
std = np.std(regret, axis=0) / np.sqrt(rep)
plt.plot(t[::EVERY], mean_regret[::EVERY], linewidth=LW, label=NAMES[alg_name])
plt.fill_between(t[::EVERY], mean_regret[::EVERY] - 2 * std[::EVERY], mean_regret[::EVERY] + 2 * std[::EVERY],
alpha=0.15)
ymax = max(ymax, mean_regret[-1] + 2 * std[-1])
plt.xlim([0, T])
plt.ylim([0, ymax])
plt.xlabel("Time")
plt.ylabel("Cumulative Regret")
plt.legend()
average_name = os.path.join(folder, "avg_regret.png")
print("Saving average performance to %s ..." % average_name)
plt.savefig(average_name)
average_name = os.path.join(folder, "avg_regret.tex")
tikzplotlib.save(average_name)
print("Done.\n")
for alg_name in avg_area.keys():
print("AverageAREA({}): {}".format(alg_name, np.mean(avg_area[alg_name])))
avg_margin_name = os.path.join(folder, "avg_margin.png")
print("Saving average margin to %s..." % avg_margin_name)
plt.figure(figsize=(10, 10))
ymax = -np.inf
ymin = np.inf
maxt = -np.inf
TOSAVE = {}
for alg_name in avg_margin.keys():
margin = np.array(avg_margin[alg_name])
rep, T = margin.shape
t = np.arange(1, T + 1)
mean_margin = np.mean(margin, axis=0)
std = np.std(margin, axis=0) / np.sqrt(rep)
idxs = mean_margin < 2
if np.sum(idxs) > 0:
plt.plot(t[idxs], mean_margin[idxs], linewidth=LW, label=alg_name)
plt.fill_between(t[idxs],
mean_margin[idxs] - 2 * std[idxs], mean_margin[idxs] + 2 * std[idxs],
alpha=0.15)
ymin = min(ymin, np.min(mean_margin[idxs] - 2 * std[idxs]))
ymax = max(ymax, np.max(mean_margin[idxs] + 2 * std[idxs]))
maxt = max(maxt, np.max(t[idxs]))
M = np.concatenate((t.reshape(-1,1), mean_margin.reshape(-1,1), std.reshape(-1,1)), axis=1)
TOSAVE[alg_name] = M
plt.xlim([1, maxt])
# plt.ylim([0, ymax])
plt.xlabel("Time")
plt.ylabel("Average Margin")
plt.legend()
plt.savefig(avg_margin_name)
average_name = os.path.join(folder, "avg_margin.tex")
plt.close()
np.savez_compressed(os.path.join(folder, "avg_margin"), **TOSAVE)
print("Done.\n")
print(bad_model, min_val)
plt.figure(figsize=(10, 10))
plt.title("Model: {}".format(bad_model))
ymax = -np.inf
T = None
for model in [results[bad_model]]: # results:
print(model[2])
# for el in model[2]:
# print(el.mean)
for alg_name, val in model[1]:
print(alg_name)
rep, T = val['cum_rewards'].shape
t = np.arange(1, T + 1)
mean_regret = np.mean(val['regret'], axis=0)
std = np.std(val['regret'], axis=0) / np.sqrt(rep)
low_quantile = np.quantile(val['regret'], 0.25, axis=0)
high_quantile = np.quantile(val['regret'], 0.75, axis=0)
rwds = np.mean(val['cum_rewards'], axis=0)
low_quantile_rwds = np.quantile(val['cum_rewards'], 0.25, axis=0)
high_quantile_rwds = np.quantile(val['cum_rewards'], 0.75, axis=0)
plt.plot(t[::EVERY], mean_regret[::EVERY], linewidth=LW, label=alg_name)
plt.fill_between(t[::EVERY], mean_regret[::EVERY] - 2 * std[::EVERY], mean_regret[::EVERY] + 2 * std[::EVERY],
alpha=0.15)
ymax = max(ymax, mean_regret[-1] + 2 * std[-1])
plt.xlim([0, T])
plt.ylim([0, ymax])
plt.legend()
plt.savefig(os.path.join(folder, "worst_mab_exp.png"))
print("Done.\n")
worst_name = os.path.join(folder, "worst_mab_exp.tex")
print("Saving worst model to %s ..." % worst_name)
tikzplotlib.save(worst_name)
print("Done.\n")
archive_name = "{}.tar.gz".format(folder)
print("Compressing files to %s ..." % archive_name)
tardir(folder, archive_name)
print("Done.\n")
plt.show()
| ContextualBanditsAttacks-main | examples/parse_mab_results.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Created on Thu Aug 22 15:37:36 2019
@author: evrard
"""
import pickle
import numpy as np
import matplotlib.pyplot as plt
import tikzplotlib
import os
import sys
import shutil
from cycler import cycler
import tarfile
def tardir(path, tar_name):
with tarfile.open(tar_name, "w:gz") as tar_handle:
for root, dirs, files in os.walk(path):
for file in files:
tar_handle.add(os.path.join(root, file))
def plot_model(model, name):
ymax = -np.inf
batches = []
plt.figure()
plt.title('model: {}'.format(name))
area = 0.
for p, AAA in model[1]:
batches = []
UCB_vals = None
UCB_stds = None
CUCB1_vals = None
CUCB1_stds = None
CUCBB_vals = []
CUCBB_stds = []
for alg_name, mean_regret, std in AAA:
print(alg_name)
if alg_name == "UCB":
UCB_vals = mean_regret[-1]
UCB_stds = std[-1]
elif alg_name == "CUCB-new-0.1-1":
CUCB1_vals = mean_regret[-1]
CUCB1_stds = std[-1]
CUCBB_vals.append(mean_regret[-1])
CUCBB_stds.append(std[-1])
batches.append(int(alg_name.split('-')[-1]))
else:
CUCBB_vals.append(mean_regret[-1])
CUCBB_stds.append(std[-1])
batches.append(int(alg_name.split('-')[-1]))
# area += CUCBB_vals - UCB_vals
CUCBB_vals = np.array(CUCBB_vals)
CUCBB_stds = np.array(CUCBB_stds)
if CUCB1_vals is not None:
ax1 = plt.plot([batches[0], batches[-1]], [CUCB1_vals, CUCB1_vals], label='CUCB_p{}'.format(p),
marker='o')
ax1_col = ax1[0].get_color()
plt.fill_between([batches[0], batches[-1]], CUCB1_vals - 2 * CUCB1_stds, CUCB1_vals + 2 * CUCB1_stds,
alpha=0.15, color=ax1_col)
ymax = max(ymax, CUCB1_vals + 2 * CUCB1_stds)
if UCB_vals is not None:
ax1 = plt.plot([batches[0], batches[len(batches) - 1]], [UCB_vals, UCB_vals],
label='UCB_p{}'.format(p), marker='+')
ax1_col = ax1[0].get_color()
plt.fill_between(batches, UCB_vals - 2 * UCB_stds, UCB_vals + 2 * UCB_stds, alpha=0.15, color=ax1_col)
ymax = max(ymax, UCB_vals + 2 * UCB_stds)
if len(CUCBB_vals) > 0:
ax1 = plt.plot(batches, CUCBB_vals, label='CUCB_BATCH_p{}'.format(p), marker='d')
ax1_col = ax1[0].get_color()
plt.fill_between(batches, CUCBB_vals - 2 * CUCBB_stds, CUCBB_vals + 2 * CUCBB_stds,
alpha=0.15, color=ax1_col)
ymax = max(ymax, np.max(CUCBB_vals + 2 * CUCBB_stds))
N = np.asscalar(np.max(batches))
plt.xlim([0, N])
plt.ylim([0, ymax])
plt.xlabel("Checkpoint")
plt.ylabel("Regret")
plt.legend()
return area
def plot_all2(results):
processed_results = {}
T = None
for m, model in enumerate(results):
batches = []
for p, AAA in model[1]:
if p not in processed_results.keys():
processed_results[p] = []
batches = []
UCB_vals = None
UCB_stds = None
CUCB1_vals = None
CUCB1_stds = None
CUCBB_vals = []
CUCBB_stds = []
for alg_name, mean_regret, std in AAA:
print(alg_name)
if alg_name == "UCB":
T = len(mean_regret)
UCB_vals = mean_regret[-1]
UCB_stds = std[-1]
elif alg_name == "CUCB-new-0.1-1":
CUCB1_vals = mean_regret[-1]
CUCB1_stds = std[-1]
CUCBB_vals.append(mean_regret[-1])
CUCBB_stds.append(std[-1])
batches.append(int(alg_name.split('-')[-1]))
else:
CUCBB_vals.append(mean_regret[-1])
CUCBB_stds.append(std[-1])
batches.append(int(alg_name.split('-')[-1]))
# area += CUCBB_vals - UCB_vals
CUCBB_vals = np.array(CUCBB_vals)
CUCBB_stds = np.array(CUCBB_stds)
processed_results[p].append((CUCBB_vals - UCB_vals).tolist())
for p in processed_results.keys():
vals = np.array(processed_results[p])
mean = np.mean(vals, axis=0)
std = np.std(vals, axis=0) / np.sqrt(vals.shape[0])
ax1 = plt.plot(batches, mean, label="p={}".format(p))
ax1_col = ax1[0].get_color()
plt.fill_between(batches, mean - 2 * std, mean + 2 * std, alpha=0.15, color=ax1_col)
plt.legend()
plt.xlabel("Checkpoint ($T$)")
plt.ylabel("R_(CUCB2)({}) - R_(UCB)({})".format(T, T))
n = 9 # Number of colors
new_colors = [plt.get_cmap('Set1')(1. * i / n) for i in range(n)]
linestyle_cycler = cycler('linestyle', ['-', '--', ':', '-.', '-', '--', ':', '-.', '-'])
plt.rc('axes', prop_cycle=(cycler('color', new_colors) + linestyle_cycler))
plt.rc('lines', linewidth=2)
if len(sys.argv) == 1:
filename = 'COMP_20190825_033627_batch_results.pickle'
else:
filename = sys.argv[1]
SUMMARIZE = False
print("Opening file %s..." % filename)
with open(filename, 'rb') as f:
results = pickle.load(f)
print("Done.\n")
folder = filename.split('.')[0]
if os.path.exists(folder):
shutil.rmtree(folder)
os.makedirs(folder)
EVERY = 10
LW = 2
if SUMMARIZE:
new_results = []
for m, model in enumerate(results):
BBB = []
for pos, algorithms in model[1]:
AAA = []
for alg_name, val in algorithms:
# print(m, pos, alg_name)
rep, T = val['cum_rewards'].shape
mean_regret = np.mean(val['regret'], axis=0)
std = np.std(val['regret'], axis=0) / np.sqrt(rep)
AAA.append((alg_name, mean_regret, std))
BBB.append((pos, AAA))
new_results.append((m, BBB, model[2]))
with open("COMP_{}".format(filename), "wb") as f:
pickle.dump(new_results, f)
else:
bad_model = None
max_area = -np.inf
print("Generating all figures ...")
plot_all2(results)
# for m, model in enumerate(results):
#
# area = plot_model(model, name=m)
# plt.savefig(os.path.join(folder, "model{}.png".format(m)))
# tikzplotlib.save(os.path.join(folder, "model{}.tex".format(m)))
# plt.close()
#
# if area > max_area:
# bad_model = m
# max_area = area
# print(max_area)
#
# plot_model(results[bad_model], name=bad_model)
# plt.savefig(os.path.join(folder, "worst_mab_exp.png"))
# plt.close()
# print("Done.\n")
worst_name = os.path.join(folder, "mab_batch.tex")
print("Saving worst model to %s ..." % worst_name)
tikzplotlib.save(worst_name)
print("Done.\n")
archive_name = "{}.tar.gz".format(folder)
print("Compressing files to %s ..." % archive_name)
tardir(folder, archive_name)
print("Done.\n")
plt.show()
# n_models = len(results)
# n_batch = len(results[0][1]) - 1
# nb_simu, T = results[0][1][0][1]['regret'].shape
# batches = []
# q = 0.25
# regret_batch_at_T = np.zeros((n_models, n_batch, nb_simu))
# regret_UCB_T = np.zeros((n_models, 1, nb_simu))
# for m in range(n_models):
# res = results[m][1]
# for i, val in enumerate(res):
# alg_name = val[0]
# val = val[1]
# if alg_name == 'UCB':
# regret_UCB_T[m] = val['regret'][:, -1]
# else:
# alg_name[13::]
# batches.append(int(alg_name[13::]))
# regret_batch_at_T[m, i - 1, :] = val['regret'][:, -1]
#
# batches = np.array(batches)
# batches = batches / T
# regret_diff = regret_batch_at_T - regret_UCB_T
# mean_regret_diff = np.mean(regret_diff, axis=(0, 2))
# high_quantile = np.quantile(regret_diff, 1 - q, axis=(0, 2))
# low_quantile = np.quantile(regret_diff, q, axis=(0, 2))
# plt.plot(batches, mean_regret_diff, color='green')
# plt.fill_between(batches, low_quantile, high_quantile, alpha=0.15, color='green')
# plt.show()
| ContextualBanditsAttacks-main | examples/parse_batch_results.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 22 15:37:36 2019
@author: evrard
"""
filename = '20190829_104727_linear_PAR_linear_results.pickle'
import pickle
import numpy as np
import pylab as plt
with open(filename, 'rb') as f:
results = pickle.load(f)
n_models = len(results)
n_algos = len(results[0])
nb_simu,T = results[0][1][0][1]['regret'].shape
q = 0.25
t = np.linspace(1, T,T)
algorithms = {}
for alg_name, res in results[0][1] :
algorithms[alg_name] = {'regret' : np.zeros((n_models,nb_simu, T)),
'cum_rewards' : np.zeros((n_models,nb_simu, T)),
'norm_errors' : np.zeros((n_models,nb_simu, T))}
for m in range(n_models) :
res = results[m][1]
for i,val in enumerate(res) :
alg_name = val[0]
val = val[1]
algorithms[alg_name]['regret'][m,:,:] = val['regret']
algorithms[alg_name]['cum_rewards'][m,:,:] = val['cum_rewards']
algorithms[alg_name]['norm_errors'][m,:,:] = val['norm_errors']
plt.figure(figsize = (10,10))
for alg_name, res in algorithms.items() :
res['regret'] = res['regret'].cumsum(axis = 2)
mean_regret = np.mean(res['regret'], axis = (0,1))
low_quantile = np.quantile(res['regret'], q, axis = (0,1))
high_quantile = np.quantile(res['regret'], 1-q, axis = (0,1))
plt.plot(mean_regret, label = alg_name)
plt.fill_between(t, low_quantile, high_quantile, alpha = 0.15)
plt.legend()
| ContextualBanditsAttacks-main | examples/reader_linear.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import pickle
import numpy as np
import os
np.random.seed(1234)
# we want 500 for training, 100 for test for wach class
n = 500
def get_total(data):
data_x, data_y = [], []
for k, v in data.items():
for i in range(len(v)):
data_x.append(v[i])
data_y.append(k)
d = {}
d['images'] = data_x
d['labels'] = data_y
return d
# loading the pickled data
with open(os.path.join('../data/miniimagenet/data.pkl'), 'rb') as f:
data_dict = pickle.load(f)
data = data_dict['images']
labels = data_dict['labels']
# split data into classes, 600 images per class
class_dict = {}
for i in range(len(set(labels))):
class_dict[i] = []
for i in range(len(data)):
class_dict[labels[i]].append(data[i])
# Split data for each class to 500 and 100
x_train, x_test = {}, {}
for i in range(len(set(labels))):
np.random.shuffle(class_dict[i])
x_test[i] = class_dict[i][n:]
x_train[i] = class_dict[i][:n]
# mix the data
d_train = get_total(x_train)
d_test = get_total(x_test)
with open(os.path.join('../data/miniimagenet/train.pkl'), 'wb') as f:
pickle.dump(d_train, f)
with open(os.path.join('../data/miniimagenet/test.pkl'), 'wb') as f:
pickle.dump(d_test, f) | Adversarial-Continual-Learning-main | ACL-resnet/data/split_miniimagenet.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys, time, os
import numpy as np
import torch
import copy
import utils
from copy import deepcopy
from tqdm import tqdm
sys.path.append('../')
from networks.discriminator import Discriminator
class ACL(object):
def __init__(self, model, args, network):
self.args=args
self.nepochs=args.nepochs
self.sbatch=args.batch_size
# optimizer & adaptive lr
self.e_lr=args.e_lr
self.d_lr=args.d_lr
if not args.experiment == 'multidatasets':
self.e_lr=[args.e_lr] * args.ntasks
self.d_lr=[args.d_lr] * args.ntasks
else:
self.e_lr = [self.args.lrs[i][1] for i in range(len(args.lrs))]
self.d_lr = [self.args.lrs[i][1]/10. for i in range(len(args.lrs))]
print ("d_lrs : ", self.d_lr)
self.lr_min=args.lr_min
self.lr_factor=args.lr_factor
self.lr_patience=args.lr_patience
self.samples=args.samples
self.device=args.device
self.checkpoint=args.checkpoint
self.adv_loss_reg=args.adv
self.diff_loss_reg=args.orth
self.s_steps=args.s_step
self.d_steps=args.d_step
self.diff=args.diff
self.network=network
self.inputsize=args.inputsize
self.taskcla=args.taskcla
self.num_tasks=args.ntasks
# Initialize generator and discriminator
self.model=model
self.discriminator=self.get_discriminator(0)
self.discriminator.get_size()
self.latent_dim=args.latent_dim
self.task_loss=torch.nn.CrossEntropyLoss().to(self.device)
self.adversarial_loss_d=torch.nn.CrossEntropyLoss().to(self.device)
self.adversarial_loss_s=torch.nn.CrossEntropyLoss().to(self.device)
self.diff_loss=DiffLoss().to(self.device)
self.optimizer_S=self.get_S_optimizer(0)
self.optimizer_D=self.get_D_optimizer(0)
self.use_memory = True if self.args.use_memory == 'yes' else False
self.task_encoded={}
self.mu=0.0
self.sigma=1.0
print()
def get_discriminator(self, task_id):
discriminator=Discriminator(self.args, task_id).to(self.args.device)
return discriminator
def get_S_optimizer(self, task_id, e_lr=None):
if e_lr is None: e_lr=self.e_lr[task_id]
optimizer_S=torch.optim.SGD(self.model.parameters(), momentum=self.args.mom,
weight_decay=self.args.e_wd, lr=e_lr)
return optimizer_S
def get_D_optimizer(self, task_id, d_lr=None):
if d_lr is None: d_lr=self.d_lr[task_id]
optimizer_D=torch.optim.SGD(self.discriminator.parameters(), weight_decay=self.args.d_wd, lr=d_lr)
return optimizer_D
def train(self, task_id, dataset):
if task_id > 0:
self.model = self.prepare_model(task_id)
self.discriminator=self.get_discriminator(task_id)
best_loss=np.inf
best_model=utils.get_model(self.model)
best_loss_d=np.inf
best_model_d=utils.get_model(self.discriminator)
dis_lr_update=True
d_lr=self.d_lr[task_id]
patience_d=self.lr_patience
self.optimizer_D=self.get_D_optimizer(task_id, d_lr)
e_lr=self.e_lr[task_id]
patience=self.lr_patience
self.optimizer_S=self.get_S_optimizer(task_id, e_lr)
for e in range(self.nepochs):
# Train
clock0=time.time()
self.train_epoch(dataset['train'], task_id)
clock1=time.time()
train_res=self.eval_(dataset['train'], task_id)
utils.report_tr(train_res, e, self.sbatch, clock0, clock1)
# lowering the learning rate in the beginning if it predicts random chance for the first 5 epochs
if (self.args.experiment == 'cifar100' or self.args.experiment == 'miniimagenet') and e == 4:
random_chance=20.
threshold=random_chance + 2
if train_res['acc_t'] < threshold:
# Restore best validation model
d_lr=self.d_lr[task_id] / 10.
self.optimizer_D=self.get_D_optimizer(task_id, d_lr)
print("Performance on task {} is {} so Dis's lr is decreased to {}".format(task_id, train_res[
'acc_t'], d_lr), end=" ")
e_lr=self.e_lr[task_id] / 10.
self.optimizer_S=self.get_S_optimizer(task_id, e_lr)
self.discriminator=self.get_discriminator(task_id)
if task_id > 0:
self.model=self.load_checkpoint(task_id - 1)
else:
self.model=self.network.Net(self.args).to(self.args.device)
# Valid
valid_res=self.eval_(dataset['valid'], task_id)
utils.report_val(valid_res)
# Adapt lr for S and D
if valid_res['loss_tot'] < best_loss:
best_loss=valid_res['loss_tot']
best_model=utils.get_model(self.model)
patience=self.lr_patience
print(' *', end='')
else:
patience-=1
if patience <= 0:
e_lr/=self.lr_factor
print(' lr={:.1e}'.format(e_lr), end='')
if e_lr < self.lr_min:
print()
break
patience=self.lr_patience
self.optimizer_S=self.get_S_optimizer(task_id, e_lr)
if train_res['loss_a'] < best_loss_d:
best_loss_d=train_res['loss_a']
best_model_d=utils.get_model(self.discriminator)
patience_d=self.lr_patience
else:
patience_d-=1
if patience_d <= 0 and dis_lr_update:
d_lr/=self.lr_factor
print(' Dis lr={:.1e}'.format(d_lr))
if d_lr < self.lr_min:
dis_lr_update=False
print("Dis lr reached minimum value")
print()
patience_d=self.lr_patience
self.optimizer_D=self.get_D_optimizer(task_id, d_lr)
print()
# Restore best validation model (early-stopping)
self.model.load_state_dict(copy.deepcopy(best_model))
self.discriminator.load_state_dict(copy.deepcopy(best_model_d))
self.save_all_models(task_id)
def train_epoch(self, train_loader, task_id):
self.model.train()
self.discriminator.train()
for data, target, tt, td in train_loader:
x=data.to(device=self.device)
y=target.to(device=self.device, dtype=torch.long)
tt=tt.to(device=self.device)
if self.use_memory:
# Detaching samples in the batch which do not belong to the current task before feeding them to P
t_current = task_id * torch.ones_like(tt)
body_mask = torch.eq(t_current, tt).cpu().numpy()
# x_task_module=data.to(device=self.device)
x_task_module = data.clone()
for index in range(x.size(0)):
if body_mask[index] == 0:
x_task_module[index] = x_task_module[index].detach()
x_task_module = x_task_module.to(device=self.device)
# Discriminator's real and fake task labels
t_real_D=td.to(self.device)
t_fake_D=torch.zeros_like(t_real_D).to(self.device)
# ================================================================== #
# Train Shared Module #
# ================================================================== #
# training S for s_steps
for s_step in range(self.s_steps):
self.optimizer_S.zero_grad()
self.model.zero_grad()
if self.use_memory:
output=self.model(x, x_task_module, tt)
else:
output = self.model(x, x)
# task_loss=self.task_loss(output, y)
task_loss=self.task_loss(output['out'], y)
shared_out, private_out = output['shared'], output['private']
dis_out_gen_training=self.discriminator.forward(shared_out)
adv_loss=self.adversarial_loss_s(dis_out_gen_training, t_real_D)
if self.diff == 'yes':
diff_loss=self.diff_loss(shared_out, private_out)
else:
diff_loss=torch.tensor(0).to(device=self.device, dtype=torch.float32)
self.diff_loss_reg=0
total_loss=task_loss + self.adv_loss_reg * adv_loss + self.diff_loss_reg * diff_loss
total_loss.backward(retain_graph=True)
self.optimizer_S.step()
# ================================================================== #
# Train Discriminator #
# ================================================================== #
# training discriminator for d_steps
for d_step in range(self.d_steps):
self.optimizer_D.zero_grad()
self.discriminator.zero_grad()
# training discriminator on real data
if self.use_memory:
output=self.model(x, x_task_module, tt)
else:
output = self.model(x, x)
# training discriminator on real data
shared_out, private_out = output['shared'], output['private']
dis_real_out=self.discriminator.forward(shared_out.detach())
dis_real_loss=self.adversarial_loss_d(dis_real_out, t_real_D)
if self.args.experiment == 'miniimagenet':
dis_real_loss*=self.args.adv
dis_real_loss.backward(retain_graph=True)
# training discriminator on fake data
z_fake=torch.as_tensor(np.random.normal(self.mu, self.sigma, (x.size(0), self.latent_dim)),
dtype=torch.float32, device=self.device)
dis_fake_out=self.discriminator.forward(z_fake)
dis_fake_loss=self.adversarial_loss_d(dis_fake_out, t_fake_D)
if self.args.experiment == 'miniimagenet':
dis_fake_loss*=self.args.adv
dis_fake_loss.backward(retain_graph=True)
self.optimizer_D.step()
return
def eval_(self, data_loader, task_id):
loss_a, loss_t, loss_d, loss_total=0, 0, 0, 0
correct_d, correct_t = 0, 0
num=0
batch=0
self.model.eval()
self.discriminator.eval()
res={}
with torch.no_grad():
for batch, (data, target, tt, td) in enumerate(data_loader):
x=data.to(device=self.device)
y=target.to(device=self.device, dtype=torch.long)
tt=tt.to(device=self.device)
t_real_D=td.to(self.device)
# Forward
output = self.model(x, x)
shared_out, private_out = output['shared'], output['private']
_, pred=output['out'].max(1)
correct_t+=pred.eq(y.view_as(pred)).sum().item()
# Discriminator's performance:
output_d=self.discriminator(shared_out)
_, pred_d=output_d.max(1)
correct_d+=pred_d.eq(t_real_D.view_as(pred_d)).sum().item()
# Loss values
task_loss=self.task_loss(output['out'], y)
adv_loss=self.adversarial_loss_d(output_d, t_real_D)
if self.diff == 'yes':
diff_loss=self.diff_loss(shared_out, private_out)
else:
diff_loss=torch.tensor(0).to(device=self.device, dtype=torch.float32)
self.diff_loss_reg=0
total_loss = task_loss + self.adv_loss_reg * adv_loss + self.diff_loss_reg * diff_loss
loss_t+=task_loss
loss_a+=adv_loss
loss_d+=diff_loss
loss_total+=total_loss
num+=x.size(0)
res['loss_t'], res['acc_t']=loss_t.item() / (batch + 1), 100 * correct_t / num
res['loss_a'], res['acc_d']=loss_a.item() / (batch + 1), 100 * correct_d / num
res['loss_d']=loss_d.item() / (batch + 1)
res['loss_tot']=loss_total.item() / (batch + 1)
res['size']=self.loader_size(data_loader)
return res
#
def test(self, data_loader, task_id, model):
loss_a, loss_t, loss_d, loss_total=0, 0, 0, 0
correct_d, correct_t=0, 0
num=0
batch=0
model.eval()
self.discriminator.eval()
res={}
with torch.no_grad():
for batch, (data, target, tt, td) in enumerate(data_loader):
x=data.to(device=self.device)
y=target.to(device=self.device, dtype=torch.long)
t_real_D=td.to(self.device)
# Forward
output = model(x, x)
# shared_out, private_out = self.model.get_encoded_ftrs(x, x)
shared_out, private_out = output['shared'], output['private']
_, pred=output['out'].max(1)
correct_t+=pred.eq(y.view_as(pred)).sum().item()
# Discriminator's performance:
output_d=self.discriminator(shared_out)
_, pred_d=output_d.max(1)
correct_d+=pred_d.eq(t_real_D.view_as(pred_d)).sum().item()
if self.diff == 'yes':
diff_loss=self.diff_loss(shared_out, private_out)
else:
diff_loss=torch.tensor(0).to(device=self.device, dtype=torch.float32)
self.diff_loss_reg=0
# Loss values
adv_loss=self.adversarial_loss_d(output_d, t_real_D)
task_loss=self.task_loss(output['out'], y)
total_loss=task_loss + self.adv_loss_reg * adv_loss + self.diff_loss_reg * diff_loss
loss_t+=task_loss
loss_a+=adv_loss
loss_d+=diff_loss
loss_total+=total_loss
num+=x.size(0)
res['loss_t'], res['acc_t']=loss_t.item() / (batch + 1), 100 * correct_t / num
res['loss_a'], res['acc_d']=loss_a.item() / (batch + 1), 100 * correct_d / num
res['loss_d']=loss_d.item() / (batch + 1)
res['loss_tot']=loss_total.item() / (batch + 1)
res['size']=self.loader_size(data_loader)
return res
def save_all_models(self, task_id):
print("Saving all models for task {} ...".format(task_id+1))
dis=utils.get_model(self.discriminator)
torch.save({'model_state_dict': dis,
}, os.path.join(self.checkpoint, 'discriminator_{}.pth.tar'.format(task_id)))
model=utils.get_model(self.model)
torch.save({'model_state_dict': model,
}, os.path.join(self.checkpoint, 'model_{}.pth.tar'.format(task_id)))
def load_model(self, task_id):
# Load a previous model
net=self.network.Net(self.args)
checkpoint=torch.load(os.path.join(self.checkpoint, 'model_{}.pth.tar'.format(task_id)))
net.load_state_dict(checkpoint['model_state_dict'])
# # Change the previous shared module with the current one
current_shared_module=deepcopy(self.model.shared.state_dict())
net.shared.load_state_dict(current_shared_module)
net=net.to(self.args.device)
return net
def load_checkpoint(self, task_id):
print("Loading checkpoint for task {} ...".format(task_id))
# Load a previous model
net=self.network.Net(self.args)
checkpoint=torch.load(os.path.join(self.checkpoint, 'model_{}.pth.tar'.format(task_id)))
net.load_state_dict(checkpoint['model_state_dict'])
net=net.to(self.args.device)
return net
def prepare_model(self, task_id):
# Load a previous model and grab its shared module
old_net = self.load_checkpoint(task_id-1)
old_shared_module = old_net.shared.state_dict()
# Instantiate a new model and replace its shared module
model = self.network.Net(self.args)
model.shared.load_state_dict(old_shared_module)
model = model.to(self.device)
return model
def loader_size(self, data_loader):
return data_loader.dataset.__len__()
def get_tsne_embeddings_first_ten_tasks(self, dataset, model):
from tensorboardX import SummaryWriter
model.eval()
tag_ = '_diff_{}'.format(self.args.diff)
all_images, all_shared, all_private = [], [], []
# Test final model on first 10 tasks:
writer = SummaryWriter()
for t in range(10):
for itr, (data, _, tt, td) in enumerate(dataset[t]['tsne']):
x = data.to(device=self.device)
tt = tt.to(device=self.device)
output = model.forward(x, x, tt, t)
shared_out, private_out = model.get_encoded_ftrs(x, x, t)
all_shared.append(shared_out)
all_private.append(private_out)
all_images.append(x)
print (torch.stack(all_shared).size())
tag = ['Shared10_{}_{}'.format(tag_,i) for i in range(1,11)]
writer.add_embedding(mat=torch.stack(all_shared,dim=1).data, label_img=torch.stack(all_images,dim=1).data, metadata=list(range(1,11)),
tag=tag)#, metadata_header=list(range(1,11)))
tag = ['Private10_{}_{}'.format(tag_, i) for i in range(1, 11)]
writer.add_embedding(mat=torch.stack(all_private,dim=1).data, label_img=torch.stack(all_images,dim=1).data, metadata=list(range(1,11)),
tag=tag)#,metadata_header=list(range(1,11)))
writer.close()
def get_tsne_embeddings_last_three_tasks(self, dataset, model):
from tensorboardX import SummaryWriter
# Test final model on last 3 tasks:
model.eval()
tag = '_diff_{}'.format(self.args.diff)
for t in [17,18,19]:
all_images, all_labels, all_shared, all_private = [], [], [], []
writer = SummaryWriter()
for itr, (data, target, tt, td) in enumerate(dataset[t]['tsne']):
x = data.to(device=self.device)
y = target.to(device=self.device, dtype=torch.long)
tt = tt.to(device=self.device)
output = model.forward(x, x, tt, t)
shared_out, private_out = model.get_encoded_ftrs(x, x, t)
# print (shared_out.size())
all_shared.append(shared_out)
all_private.append(private_out)
all_images.append(x)
all_labels.append(y)
writer.add_embedding(mat=torch.stack(all_shared,dim=1).data, label_img=torch.stack(all_images,dim=1).data,
metadata=list(range(1,6)), tag='Shared_{}_{}'.format(t, tag))
# ,metadata_header=list(range(1,6)))
writer.add_embedding(mat=torch.stack(all_private,dim=1).data, label_img=torch.stack(all_images,dim=1).data,
metadata=list(range(1,6)), tag='Private_{}_{}'.format(t, tag))
# ,metadata_header=list(range(1,6)))
writer.close()
def inference(self, data_loader, task_id, model):
loss_a, loss_t, loss_d, loss_total = 0, 0, 0, 0
correct_d, correct_t = 0, 0
num = 0
batch = 0
model.eval()
self.discriminator.eval()
res = {}
with torch.no_grad():
for batch, (data, target, tt, td) in enumerate(data_loader):
x = data.to(device=self.device)
y = target.to(device=self.device, dtype=torch.long)
tt = tt.to(device=self.device)
# Forward
output = model.forward(x, x, tt, task_id)
shared_out, task_out = model.get_encoded_ftrs(x, x, task_id)
_, pred = output.max(1)
correct_t += pred.eq(y.view_as(pred)).sum().item()
if self.diff == 'yes':
diff_loss = self.diff_loss(shared_out, task_out)
else:
diff_loss = torch.tensor(0).to(device=self.device, dtype=torch.float32)
self.diff_loss_reg = 0
# Loss values
task_loss = self.task_loss(output, y)
total_loss = task_loss + self.diff_loss_reg * diff_loss
loss_t += task_loss
# loss_a += adv_loss
loss_d += diff_loss
loss_total += total_loss
num += x.size(0)
res['loss_t'], res['acc_t'] = loss_t.item() / (batch + 1), 100 * correct_t / num
res['loss_d'] = loss_d.item() / (batch + 1)
res['loss_tot'] = loss_total.item() / (batch + 1)
res['size'] = self.loader_size(data_loader)
return res
#
class DiffLoss(torch.nn.Module):
# From: Domain Separation Networks (https://arxiv.org/abs/1608.06019)
# Konstantinos Bousmalis, George Trigeorgis, Nathan Silberman, Dilip Krishnan, Dumitru Erhan
def __init__(self):
super(DiffLoss, self).__init__()
def forward(self, D1, D2):
D1=D1.view(D1.size(0), -1)
D1_norm=torch.norm(D1, p=2, dim=1, keepdim=True).detach()
D1_norm=D1.div(D1_norm.expand_as(D1) + 1e-6)
D2=D2.view(D2.size(0), -1)
D2_norm=torch.norm(D2, p=2, dim=1, keepdim=True).detach()
D2_norm=D2.div(D2_norm.expand_as(D2) + 1e-6)
# return torch.mean((D1_norm.mm(D2_norm.t()).pow(2)))
return torch.mean((D1_norm.mm(D2_norm.t()).pow(2)))
| Adversarial-Continual-Learning-main | ACL-resnet/src/acl.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
from copy import deepcopy
import pickle
import time
import uuid
from subprocess import call
########################################################################################################################
def human_format(num):
magnitude=0
while abs(num)>=1000:
magnitude+=1
num/=1000.0
return '%.1f%s'%(num,['','K','M','G','T','P'][magnitude])
def report_tr(res, e, sbatch, clock0, clock1):
# Training performance
print(
'| Epoch {:3d}, time={:5.1f}ms/{:5.1f}ms | Train losses={:.3f} | T: loss={:.3f}, acc={:5.2f}% | D: loss={:.3f}, acc={:5.1f}%, '
'Diff loss:{:.3f} |'.format(
e + 1,
1000 * sbatch * (clock1 - clock0) / res['size'],
1000 * sbatch * (time.time() - clock1) / res['size'], res['loss_tot'],
res['loss_t'], res['acc_t'], res['loss_a'], res['acc_d'], res['loss_d']), end='')
def report_val(res):
# Validation performance
print(' Valid losses={:.3f} | T: loss={:.6f}, acc={:5.2f}%, | D: loss={:.3f}, acc={:5.2f}%, Diff loss={:.3f} |'.format(
res['loss_tot'], res['loss_t'], res['acc_t'], res['loss_a'], res['acc_d'], res['loss_d']), end='')
########################################################################################################################
def get_model(model):
return deepcopy(model.state_dict())
########################################################################################################################
def compute_conv_output_size(Lin,kernel_size,stride=1,padding=0,dilation=1):
return int(np.floor((Lin+2*padding-dilation*(kernel_size-1)-1)/float(stride)+1))
########################################################################################################################
def save_print_log(taskcla, acc, lss, output_path):
print('*'*100)
print('Accuracies =')
for i in range(acc.shape[0]):
print('\t',end=',')
for j in range(acc.shape[1]):
print('{:5.4f}% '.format(acc[i,j]),end=',')
print()
print ('ACC: {:5.4f}%'.format((np.mean(acc[acc.shape[0]-1,:]))))
print()
print ('BWD Transfer = ')
print ()
print ("Diagonal R_ii")
for i in range(acc.shape[0]):
print('\t',end='')
print('{:5.2f}% '.format(np.diag(acc)[i]), end=',')
print()
print ("Last row")
for i in range(acc.shape[0]):
print('\t', end=',')
print('{:5.2f}% '.format(acc[-1][i]), end=',')
print()
# BWT calculated based on GEM paper (https://arxiv.org/abs/1706.08840)
gem_bwt = sum(acc[-1]-np.diag(acc))/ (len(acc[-1])-1)
# BWT calculated based on our UCB paper (https://openreview.net/pdf?id=HklUCCVKDB)
ucb_bwt = (acc[-1] - np.diag(acc)).mean()
print ('BWT: {:5.2f}%'.format(gem_bwt))
# print ('BWT (UCB paper): {:5.2f}%'.format(ucb_bwt))
print('*'*100)
print('Done!')
logs = {}
# save results
logs['name'] = output_path
logs['taskcla'] = taskcla
logs['acc'] = acc
logs['loss'] = lss
logs['gem_bwt'] = gem_bwt
logs['ucb_bwt'] = ucb_bwt
logs['rii'] = np.diag(acc)
logs['rij'] = acc[-1]
# pickle
with open(os.path.join(output_path, 'logs.p'), 'wb') as output:
pickle.dump(logs, output)
print ("Log file saved in ", os.path.join(output_path, 'logs.p'))
def print_log_acc_bwt(taskcla, acc, lss, output_path, run_id):
print('*'*100)
print('Accuracies =')
for i in range(acc.shape[0]):
print('\t',end=',')
for j in range(acc.shape[1]):
print('{:5.4f}% '.format(acc[i,j]),end=',')
print()
avg_acc = np.mean(acc[acc.shape[0]-1,:])
print ('ACC: {:5.4f}%'.format(avg_acc))
print()
print()
# BWT calculated based on GEM paper (https://arxiv.org/abs/1706.08840)
gem_bwt = sum(acc[-1]-np.diag(acc))/ (len(acc[-1])-1)
# BWT calculated based on UCB paper (https://arxiv.org/abs/1906.02425)
ucb_bwt = (acc[-1] - np.diag(acc)).mean()
print ('BWT: {:5.2f}%'.format(gem_bwt))
# print ('BWT (UCB paper): {:5.2f}%'.format(ucb_bwt))
print('*'*100)
print('Done!')
logs = {}
# save results
logs['name'] = output_path
logs['taskcla'] = taskcla
logs['acc'] = acc
logs['loss'] = lss
logs['gem_bwt'] = gem_bwt
logs['ucb_bwt'] = ucb_bwt
logs['rii'] = np.diag(acc)
logs['rij'] = acc[-1]
# pickle
path = os.path.join(output_path, 'logs_run_id_{}.p'.format(run_id))
with open(path, 'wb') as output:
pickle.dump(logs, output)
print ("Log file saved in ", path)
return avg_acc, gem_bwt
def print_running_acc_bwt(acc, task_num):
print()
acc = acc[:task_num+1,:task_num+1]
avg_acc = np.mean(acc[acc.shape[0] - 1, :])
gem_bwt = sum(acc[-1] - np.diag(acc)) / (len(acc[-1]) - 1)
print('ACC: {:5.4f}% || BWT: {:5.2f}% '.format(avg_acc, gem_bwt))
print()
def make_directories(args):
uid = uuid.uuid4().hex
if args.checkpoint is None:
os.mkdir('checkpoints')
args.checkpoint = os.path.join('./checkpoints/',uid)
os.mkdir(args.checkpoint)
else:
if not os.path.exists(args.checkpoint):
os.mkdir(args.checkpoint)
args.checkpoint = os.path.join(args.checkpoint, uid)
os.mkdir(args.checkpoint)
def some_sanity_checks(args):
# Making sure the chosen experiment matches with the number of tasks performed in the paper:
datasets_tasks = {}
datasets_tasks['mnist5']=[5]
datasets_tasks['pmnist']=[10,20,30,40]
datasets_tasks['cifar100']=[20]
datasets_tasks['miniimagenet']=[20]
datasets_tasks['multidatasets']=[5]
if not args.ntasks in datasets_tasks[args.experiment]:
raise Exception("Chosen number of tasks ({}) does not match with {} experiment".format(args.ntasks,args.experiment))
# Making sure if memory usage is happenning:
if args.use_memory == 'yes' and not args.samples > 0:
raise Exception("Flags required to use memory: --use_memory yes --samples n where n>0")
if args.use_memory == 'no' and args.samples > 0:
raise Exception("Flags required to use memory: --use_memory yes --samples n where n>0")
def save_code(args):
cwd = os.getcwd()
des = os.path.join(args.checkpoint, 'code') + '/'
if not os.path.exists(des):
os.mkdir(des)
def get_folder(folder):
return os.path.join(cwd,folder)
folders = [get_folder(item) for item in ['dataloaders', 'networks', 'configs', 'main.py', 'acl.py', 'utils.py']]
for folder in folders:
call('cp -rf {} {}'.format(folder, des),shell=True)
def print_time():
from datetime import datetime
# datetime object containing current date and time
now = datetime.now()
# dd/mm/YY H:M:S
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
print("Job finished at =", dt_string)
| Adversarial-Continual-Learning-main | ACL-resnet/src/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os,argparse,time
import numpy as np
from omegaconf import OmegaConf
from copy import deepcopy
import torch
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import utils
tstart=time.time()
# Arguments
parser = argparse.ArgumentParser(description='Adversarial Continual Learning...')
# Load the config file
parser.add_argument('--config', type=str, default='./configs/config_mnist5.yml')
flags = parser.parse_args()
args = OmegaConf.load(flags.config)
print()
########################################################################################################################
# Args -- Experiment
if args.experiment=='pmnist':
from dataloaders import pmnist as datagenerator
elif args.experiment=='mnist5':
from dataloaders import mnist5 as datagenerator
elif args.experiment=='cifar100':
from dataloaders import cifar100 as datagenerator
elif args.experiment=='miniimagenet':
from dataloaders import miniimagenet as datagenerator
elif args.experiment=='multidatasets':
from dataloaders import mulitidatasets as datagenerator
else:
raise NotImplementedError
from acl import ACL as approach
# Args -- Network
if args.experiment == 'mnist5' or args.experiment == 'pmnist':
from networks import mlp_acl as network
elif args.experiment == 'cifar100' or args.experiment == 'miniimagenet' or args.experiment == 'multidatasets':
if args.arch == 'alexnet':
from networks import alexnet_acl as network
elif args.arch == 'resnet':
from networks import resnet_acl as network
else:
raise NotImplementedError
else:
raise NotImplementedError
########################################################################################################################
def run(args, run_id):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(args.seed)
# Faster run but not deterministic:
# torch.backends.cudnn.benchmark = True
# To get deterministic results that match with paper at cost of lower speed:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Data loader
print('Instantiate data generators and model...')
dataloader = datagenerator.DatasetGen(args)
args.taskcla, args.inputsize = dataloader.taskcla, dataloader.inputsize
if args.experiment == 'multidatasets': args.lrs = dataloader.lrs
# Model
net = network.Net(args)
net = net.to(args.device)
net.print_model_size()
# print (net)
# Approach
appr=approach(net,args,network=network)
# Loop tasks
acc=np.zeros((len(args.taskcla),len(args.taskcla)),dtype=np.float32)
lss=np.zeros((len(args.taskcla),len(args.taskcla)),dtype=np.float32)
for t,ncla in args.taskcla:
print('*'*250)
dataset = dataloader.get(t)
print(' '*105, 'Dataset {:2d} ({:s})'.format(t+1,dataset[t]['name']))
print('*'*250)
# Train
appr.train(t,dataset[t])
print('-'*250)
print()
for u in range(t+1):
# Load previous model and replace the shared module with the current one
test_model = appr.load_model(u)
test_res = appr.test(dataset[u]['test'], u, model=test_model)
print('>>> Test on task {:2d} - {:15s}: loss={:.3f}, acc={:5.1f}% <<<'.format(u, dataset[u]['name'],
test_res['loss_t'],
test_res['acc_t']))
acc[t, u] = test_res['acc_t']
lss[t, u] = test_res['loss_t']
# Save
print()
print('Saved accuracies at '+os.path.join(args.checkpoint,args.output))
np.savetxt(os.path.join(args.checkpoint,args.output),acc,'%.6f')
# Extract embeddings to plot in tensorboard for miniimagenet
if args.tsne == 'yes' and args.experiment == 'miniimagenet':
appr.get_tsne_embeddings_first_ten_tasks(dataset, model=appr.load_model(t))
appr.get_tsne_embeddings_last_three_tasks(dataset, model=appr.load_model(t))
avg_acc, gem_bwt = utils.print_log_acc_bwt(args.taskcla, acc, lss, output_path=args.checkpoint, run_id=run_id)
return avg_acc, gem_bwt
#######################################################################################################################
def main(args):
utils.make_directories(args)
utils.some_sanity_checks(args)
utils.save_code(args)
print('=' * 100)
print('Arguments =')
for arg in vars(args):
print('\t' + arg + ':', getattr(args, arg))
print('=' * 100)
accuracies, forgetting = [], []
for n in range(args.num_runs):
args.seed = n
args.output = '{}_{}_tasks_seed_{}.txt'.format(args.experiment, args.ntasks, args.seed)
print ("args.output: ", args.output)
print (" >>>> Run #", n)
acc, bwt = run(args, n)
accuracies.append(acc)
forgetting.append(bwt)
print('*' * 100)
print ("Average over {} runs: ".format(args.num_runs))
print ('AVG ACC: {:5.4f}% \pm {:5.4f}'.format(np.array(accuracies).mean(), np.array(accuracies).std()))
print ('AVG BWT: {:5.2f}% \pm {:5.4f}'.format(np.array(forgetting).mean(), np.array(forgetting).std()))
print ("All Done! ")
print('[Elapsed time = {:.1f} min]'.format((time.time()-tstart)/(60)))
utils.print_time()
def test_trained_model(args, final_model_id):
args.seed = 0
print('Instantiate data generators and model...')
dataloader = datagenerator.DatasetGen(args)
args.taskcla, args.inputsize = dataloader.taskcla, dataloader.inputsize
if args.experiment == 'multidatasets': args.lrs = dataloader.lrs
def get_model(final_model_id, test_data_id):
# Load the test model
test_net = network.Net(args)
checkpoint_test = torch.load(os.path.join(args.checkpoint, 'model_{}.pth.tar'.format(test_data_id)))
test_net.load_state_dict(checkpoint_test['model_state_dict'])
# Load your final trained model
net = network.Net(args)
checkpoint = torch.load(os.path.join(args.checkpoint, 'model_{}.pth.tar'.format(final_model_id)))
net.load_state_dict(checkpoint['model_state_dict'])
# # Change the shared module with the final model's shared module
final_shared = deepcopy(net.shared.state_dict())
test_net.shared.load_state_dict(final_shared)
test_net = test_net.to(args.device)
return test_net
for t,ncla in args.taskcla:
print('*'*250)
dataset = dataloader.get(t)
print(' '*105, 'Dataset {:2d} ({:s})'.format(t+1,dataset[t]['name']))
print('*'*250)
# Model
test_model = get_model(final_model_id, test_data_id=t)
# Approach
appr = approach(test_model, args, network=network)
# Test
test_res = appr.inference(dataset[t]['test'], t, model=test_model)
print('>>> Test on task {:2d} - {:15s}: loss={:.3f}, acc={:5.4f}% <<<'.format(t, dataset[t]['name'],
test_res['loss_t'],
test_res['acc_t']))
#######################################################################################################################
if __name__ == '__main__':
main(args)
# test_trained_model(args, final_model_id=4) | Adversarial-Continual-Learning-main | ACL-resnet/src/main.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
from PIL import Image
import os
import os.path
import sys
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
import torch.utils.data as data
import numpy as np
import torch
from torchvision import datasets, transforms
from utils import *
class iCIFAR10(datasets.CIFAR10):
def __init__(self, root, classes, memory_classes, memory, task_num, train, transform=None, target_transform=None, download=True):
super(iCIFAR10, self).__init__(root, transform=transform,
target_transform=target_transform, download=True)
self.train = train # training set or test set
if not isinstance(classes, list):
classes = [classes]
self.class_mapping = {c: i for i, c in enumerate(classes)}
self.class_indices = {}
for cls in classes:
self.class_indices[self.class_mapping[cls]] = []
if self.train:
train_data = []
train_labels = []
train_tt = [] # task module labels
train_td = [] # disctiminator labels
for i in range(len(self.data)):
if self.targets[i] in classes:
train_data.append(self.data[i])
train_labels.append(self.class_mapping[self.targets[i]])
train_tt.append(task_num)
train_td.append(task_num+1)
self.class_indices[self.class_mapping[self.targets[i]]].append(i)
if memory_classes:
for task_id in range(task_num):
for i in range(len(memory[task_id]['x'])):
if memory[task_id]['y'][i] in range(len(memory_classes[task_id])):
train_data.append(memory[task_id]['x'][i])
train_labels.append(memory[task_id]['y'][i])
train_tt.append(memory[task_id]['tt'][i])
train_td.append(memory[task_id]['td'][i])
self.train_data = np.array(train_data)
self.train_labels = train_labels
self.train_tt = train_tt
self.train_td = train_td
if not self.train:
f = self.test_list[0][0]
file = os.path.join(self.root, self.base_folder, f)
fo = open(file, 'rb')
if sys.version_info[0] == 2:
entry = pickle.load(fo)
else:
entry = pickle.load(fo, encoding='latin1')
self.test_data = entry['data']
if 'labels' in entry:
self.test_labels = entry['labels']
else:
self.test_labels = entry['fine_labels']
fo.close()
self.test_data = self.test_data.reshape((10000, 3, 32, 32))
self.test_data = self.test_data.transpose((0, 2, 3, 1)) # convert to HWC
test_data = []
test_labels = []
test_tt = [] # task module labels
test_td = [] # disctiminator labels
for i in range(len(self.test_data)):
if self.test_labels[i] in classes:
test_data.append(self.test_data[i])
test_labels.append(self.class_mapping[self.test_labels[i]])
test_tt.append(task_num)
test_td.append(task_num + 1)
self.class_indices[self.class_mapping[self.test_labels[i]]].append(i)
self.test_data = np.array(test_data)
self.test_labels = test_labels
self.test_tt = test_tt
self.test_td = test_td
def __getitem__(self, index):
if self.train:
img, target, tt, td = self.train_data[index], self.train_labels[index], self.train_tt[index], self.train_td[index]
else:
img, target, tt, td = self.test_data[index], self.test_labels[index], self.test_tt[index], self.test_td[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
try:
img = Image.fromarray(img)
except:
pass
try:
if self.transform is not None:
img = self.transform(img)
except:
pass
try:
if self.target_transform is not None:
target = self.target_transform(target)
except:
pass
return img, target, tt, td
def __len__(self):
if self.train:
return len(self.train_data)
else:
return len(self.test_data)
class iCIFAR100(iCIFAR10):
"""`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
This is a subclass of the `CIFAR10` Dataset.
"""
base_folder = 'cifar-100-python'
url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
meta = {
'filename': 'meta',
'key': 'fine_label_names',
'md5': '7973b15100ade9c7d40fb424638fde48',
}
class DatasetGen(object):
"""docstring for DatasetGen"""
def __init__(self, args):
super(DatasetGen, self).__init__()
self.seed = args.seed
self.batch_size=args.batch_size
self.pc_valid=args.pc_valid
self.root = args.data_dir
self.latent_dim = args.latent_dim
self.num_tasks = args.ntasks
self.num_classes = 100
self.num_samples = args.samples
self.inputsize = [3,32,32]
mean=[x/255 for x in [125.3,123.0,113.9]]
std=[x/255 for x in [63.0,62.1,66.7]]
self.transformation = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])
self.taskcla = [[t, int(self.num_classes/self.num_tasks)] for t in range(self.num_tasks)]
self.indices = {}
self.dataloaders = {}
self.idx={}
self.num_workers = args.workers
self.pin_memory = True
np.random.seed(self.seed)
task_ids = np.split(np.random.permutation(self.num_classes),self.num_tasks)
self.task_ids = [list(arr) for arr in task_ids]
self.train_set = {}
self.test_set = {}
self.train_split = {}
self.task_memory = {}
for i in range(self.num_tasks):
self.task_memory[i] = {}
self.task_memory[i]['x'] = []
self.task_memory[i]['y'] = []
self.task_memory[i]['tt'] = []
self.task_memory[i]['td'] = []
self.use_memory = args.use_memory
def get(self, task_id):
self.dataloaders[task_id] = {}
sys.stdout.flush()
if task_id == 0:
memory_classes = None
memory=None
else:
memory_classes = self.task_ids
memory = self.task_memory
self.train_set[task_id] = iCIFAR100(root=self.root, classes=self.task_ids[task_id], memory_classes=memory_classes,
memory=memory, task_num=task_id, train=True, download=True, transform=self.transformation)
self.test_set[task_id] = iCIFAR100(root=self.root, classes=self.task_ids[task_id], memory_classes=None,
memory=None, task_num=task_id, train=False,
download=True, transform=self.transformation)
split = int(np.floor(self.pc_valid * len(self.train_set[task_id])))
train_split, valid_split = torch.utils.data.random_split(self.train_set[task_id], [len(self.train_set[task_id]) - split, split])
self.train_split[task_id] = train_split
train_loader = torch.utils.data.DataLoader(train_split, batch_size=self.batch_size, num_workers=self.num_workers,
pin_memory=self.pin_memory,shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_split, batch_size=int(self.batch_size * self.pc_valid),
num_workers=self.num_workers, pin_memory=self.pin_memory,shuffle=True)
test_loader = torch.utils.data.DataLoader(self.test_set[task_id], batch_size=self.batch_size, num_workers=self.num_workers,
pin_memory=self.pin_memory,shuffle=True)
self.dataloaders[task_id]['train'] = train_loader
self.dataloaders[task_id]['valid'] = valid_loader
self.dataloaders[task_id]['test'] = test_loader
self.dataloaders[task_id]['name'] = 'CIFAR100-{}-{}'.format(task_id,self.task_ids[task_id])
print ("Training set size: {} images of {}x{}".format(len(train_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Validation set size: {} images of {}x{}".format(len(valid_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Train+Val set size: {} images of {}x{}".format(len(valid_loader.dataset)+len(train_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Test set size: {} images of {}x{}".format(len(test_loader.dataset),self.inputsize[1],self.inputsize[1]))
if self.use_memory == 'yes' and self.num_samples > 0 :
self.update_memory(task_id)
return self.dataloaders
def update_memory(self, task_id):
num_samples_per_class = self.num_samples // len(self.task_ids[task_id])
mem_class_mapping = {i: i for i, c in enumerate(self.task_ids[task_id])}
# Looping over each class in the current task
for i in range(len(self.task_ids[task_id])):
# Getting all samples for this class
data_loader = torch.utils.data.DataLoader(self.train_split[task_id], batch_size=1,
num_workers=self.num_workers,
pin_memory=self.pin_memory)
# Randomly choosing num_samples_per_class for this class
randind = torch.randperm(len(data_loader.dataset))[:num_samples_per_class]
# Adding the selected samples to memory
for ind in randind:
self.task_memory[task_id]['x'].append(data_loader.dataset[ind][0])
self.task_memory[task_id]['y'].append(mem_class_mapping[i])
self.task_memory[task_id]['tt'].append(data_loader.dataset[ind][2])
self.task_memory[task_id]['td'].append(data_loader.dataset[ind][3])
print ('Memory updated by adding {} images'.format(len(self.task_memory[task_id]['x'])))
| Adversarial-Continual-Learning-main | ACL-resnet/src/dataloaders/cifar100.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# https://github.com/pytorch/vision/blob/8635be94d1216f10fb8302da89233bd86445e449/torchvision/datasets/utils.py
import os
import os.path
import hashlib
import gzip
import errno
import tarfile
import zipfile
import numpy as np
import torch
import codecs
from torch.utils.model_zoo import tqdm
def gen_bar_updater():
pbar = tqdm(total=None)
def bar_update(count, block_size, total_size):
if pbar.total is None and total_size:
pbar.total = total_size
progress_bytes = count * block_size
pbar.update(progress_bytes - pbar.n)
return bar_update
def calculate_md5(fpath, chunk_size=1024 * 1024):
md5 = hashlib.md5()
with open(fpath, 'rb') as f:
for chunk in iter(lambda: f.read(chunk_size), b''):
md5.update(chunk)
return md5.hexdigest()
def check_md5(fpath, md5, **kwargs):
return md5 == calculate_md5(fpath, **kwargs)
def check_integrity(fpath, md5=None):
if not os.path.isfile(fpath):
return False
if md5 is None:
return True
return check_md5(fpath, md5)
def makedir_exist_ok(dirpath):
"""
Python2 support for os.makedirs(.., exist_ok=True)
"""
try:
os.makedirs(dirpath)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
def download_url(url, root, filename=None, md5=None):
"""Download a file from a url and place it in root.
Args:
url (str): URL to download file from
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under. If None, use the basename of the URL
md5 (str, optional): MD5 checksum of the download. If None, do not check
"""
from six.moves import urllib
root = os.path.expanduser(root)
if not filename:
filename = os.path.basename(url)
fpath = os.path.join(root, filename)
makedir_exist_ok(root)
# downloads file
if check_integrity(fpath, md5):
print('Using downloaded and verified file: ' + fpath)
else:
try:
print('Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(
url, fpath,
reporthook=gen_bar_updater()
)
except (urllib.error.URLError, IOError) as e:
if url[:5] == 'https':
url = url.replace('https:', 'http:')
print('Failed download. Trying https -> http instead.'
' Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(
url, fpath,
reporthook=gen_bar_updater()
)
else:
raise e
def list_dir(root, prefix=False):
"""List all directories at a given root
Args:
root (str): Path to directory whose folders need to be listed
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the directories found
"""
root = os.path.expanduser(root)
directories = list(
filter(
lambda p: os.path.isdir(os.path.join(root, p)),
os.listdir(root)
)
)
if prefix is True:
directories = [os.path.join(root, d) for d in directories]
return directories
def list_files(root, suffix, prefix=False):
"""List all files ending with a suffix at a given root
Args:
root (str): Path to directory whose folders need to be listed
suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png').
It uses the Python "str.endswith" method and is passed directly
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the files found
"""
root = os.path.expanduser(root)
files = list(
filter(
lambda p: os.path.isfile(os.path.join(root, p)) and p.endswith(suffix),
os.listdir(root)
)
)
if prefix is True:
files = [os.path.join(root, d) for d in files]
return files
def download_file_from_google_drive(file_id, root, filename=None, md5=None):
"""Download a Google Drive file from and place it in root.
Args:
file_id (str): id of file to be downloaded
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under. If None, use the id of the file.
md5 (str, optional): MD5 checksum of the download. If None, do not check
"""
# Based on https://stackoverflow.com/questions/38511444/python-download-files-from-google-drive-using-url
import requests
url = "https://docs.google.com/uc?export=download"
root = os.path.expanduser(root)
if not filename:
filename = file_id
fpath = os.path.join(root, filename)
makedir_exist_ok(root)
if os.path.isfile(fpath) and check_integrity(fpath, md5):
print('Using downloaded and verified file: ' + fpath)
else:
session = requests.Session()
response = session.get(url, params={'id': file_id}, stream=True)
token = _get_confirm_token(response)
if token:
params = {'id': file_id, 'confirm': token}
response = session.get(url, params=params, stream=True)
_save_response_content(response, fpath)
def _get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def _save_response_content(response, destination, chunk_size=32768):
with open(destination, "wb") as f:
pbar = tqdm(total=None)
progress = 0
for chunk in response.iter_content(chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
progress += len(chunk)
pbar.update(progress - pbar.n)
pbar.close()
def _is_tar(filename):
return filename.endswith(".tar")
def _is_targz(filename):
return filename.endswith(".tar.gz")
def _is_gzip(filename):
return filename.endswith(".gz") and not filename.endswith(".tar.gz")
def _is_zip(filename):
return filename.endswith(".zip")
def extract_archive(from_path, to_path=None, remove_finished=False):
if to_path is None:
to_path = os.path.dirname(from_path)
if _is_tar(from_path):
with tarfile.open(from_path, 'r') as tar:
tar.extractall(path=to_path)
elif _is_targz(from_path):
with tarfile.open(from_path, 'r:gz') as tar:
tar.extractall(path=to_path)
elif _is_gzip(from_path):
to_path = os.path.join(to_path, os.path.splitext(os.path.basename(from_path))[0])
with open(to_path, "wb") as out_f, gzip.GzipFile(from_path) as zip_f:
out_f.write(zip_f.read())
elif _is_zip(from_path):
with zipfile.ZipFile(from_path, 'r') as z:
z.extractall(to_path)
else:
raise ValueError("Extraction of {} not supported".format(from_path))
if remove_finished:
os.remove(from_path)
def download_and_extract_archive(url, download_root, extract_root=None, filename=None,
md5=None, remove_finished=False):
download_root = os.path.expanduser(download_root)
if extract_root is None:
extract_root = download_root
if not filename:
filename = os.path.basename(url)
download_url(url, download_root, filename, md5)
archive = os.path.join(download_root, filename)
print("Extracting {} to {}".format(archive, extract_root))
extract_archive(archive, extract_root, remove_finished)
def iterable_to_str(iterable):
return "'" + "', '".join([str(item) for item in iterable]) + "'"
def verify_str_arg(value, arg=None, valid_values=None, custom_msg=None):
if not isinstance(value, torch._six.string_classes):
if arg is None:
msg = "Expected type str, but got type {type}."
else:
msg = "Expected type str for argument {arg}, but got type {type}."
msg = msg.format(type=type(value), arg=arg)
raise ValueError(msg)
if valid_values is None:
return value
if value not in valid_values:
if custom_msg is not None:
msg = custom_msg
else:
msg = ("Unknown value '{value}' for argument {arg}. "
"Valid values are {{{valid_values}}}.")
msg = msg.format(value=value, arg=arg,
valid_values=iterable_to_str(valid_values))
raise ValueError(msg)
return value
def get_int(b):
return int(codecs.encode(b, 'hex'), 16)
def open_maybe_compressed_file(path):
"""Return a file object that possibly decompresses 'path' on the fly.
Decompression occurs when argument `path` is a string and ends with '.gz' or '.xz'.
"""
if not isinstance(path, torch._six.string_classes):
return path
if path.endswith('.gz'):
import gzip
return gzip.open(path, 'rb')
if path.endswith('.xz'):
import lzma
return lzma.open(path, 'rb')
return open(path, 'rb')
def read_sn3_pascalvincent_tensor(path, strict=True):
"""Read a SN3 file in "Pascal Vincent" format (Lush file 'libidx/idx-io.lsh').
Argument may be a filename, compressed filename, or file object.
"""
# typemap
if not hasattr(read_sn3_pascalvincent_tensor, 'typemap'):
read_sn3_pascalvincent_tensor.typemap = {
8: (torch.uint8, np.uint8, np.uint8),
9: (torch.int8, np.int8, np.int8),
11: (torch.int16, np.dtype('>i2'), 'i2'),
12: (torch.int32, np.dtype('>i4'), 'i4'),
13: (torch.float32, np.dtype('>f4'), 'f4'),
14: (torch.float64, np.dtype('>f8'), 'f8')}
# read
with open_maybe_compressed_file(path) as f:
data = f.read()
# parse
magic = get_int(data[0:4])
nd = magic % 256
ty = magic // 256
assert nd >= 1 and nd <= 3
assert ty >= 8 and ty <= 14
m = read_sn3_pascalvincent_tensor.typemap[ty]
s = [get_int(data[4 * (i + 1): 4 * (i + 2)]) for i in range(nd)]
parsed = np.frombuffer(data, dtype=m[1], offset=(4 * (nd + 1)))
assert parsed.shape[0] == np.prod(s) or not strict
return torch.from_numpy(parsed.astype(m[2], copy=False)).view(*s)
def read_label_file(path):
with open(path, 'rb') as f:
x = read_sn3_pascalvincent_tensor(f, strict=False)
assert(x.dtype == torch.uint8)
assert(x.ndimension() == 1)
return x.long()
def read_image_file(path):
with open(path, 'rb') as f:
x = read_sn3_pascalvincent_tensor(f, strict=False)
assert(x.dtype == torch.uint8)
assert(x.ndimension() == 3)
return x | Adversarial-Continual-Learning-main | ACL-resnet/src/dataloaders/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
from PIL import Image
import os
import os.path
import sys
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
import torch.utils.data as data
import numpy as np
import torch
from torchvision import transforms
from utils import *
class MiniImageNet(torch.utils.data.Dataset):
def __init__(self, root, train):
super(MiniImageNet, self).__init__()
if train:
self.name='train'
else:
self.name='test'
root = os.path.join(root, 'miniimagenet')
with open(os.path.join(root,'{}.pkl'.format(self.name)), 'rb') as f:
data_dict = pickle.load(f)
self.data = data_dict['images']
self.labels = data_dict['labels']
def __len__(self):
return len(self.data)
def __getitem__(self, i):
img, label = self.data[i], self.labels[i]
return img, label
class iMiniImageNet(MiniImageNet):
def __init__(self, root, classes, memory_classes, memory, task_num, train, transform=None):
super(iMiniImageNet, self).__init__(root=root, train=train)
self.transform = transform
if not isinstance(classes, list):
classes = [classes]
self.class_mapping = {c: i for i, c in enumerate(classes)}
self.class_indices = {}
for cls in classes:
self.class_indices[self.class_mapping[cls]] = []
data = []
labels = []
tt = [] # task module labels
td = [] # disctiminator labels
for i in range(len(self.data)):
if self.labels[i] in classes:
data.append(self.data[i])
labels.append(self.class_mapping[self.labels[i]])
tt.append(task_num)
td.append(task_num+1)
self.class_indices[self.class_mapping[self.labels[i]]].append(i)
if memory_classes:
for task_id in range(task_num):
for i in range(len(memory[task_id]['x'])):
if memory[task_id]['y'][i] in range(len(memory_classes[task_id])):
data.append(memory[task_id]['x'][i])
labels.append(memory[task_id]['y'][i])
tt.append(memory[task_id]['tt'][i])
td.append(memory[task_id]['td'][i])
self.data = np.array(data)
self.labels = labels
self.tt = tt
self.td = td
def __getitem__(self, index):
img, target, tt, td = self.data[index], self.labels[index], self.tt[index], self.td[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
if not torch.is_tensor(img):
img = Image.fromarray(img)
img = self.transform(img)
return img, target, tt, td
def __len__(self):
return len(self.data)
class DatasetGen(object):
"""docstring for DatasetGen"""
def __init__(self, args):
super(DatasetGen, self).__init__()
self.seed = args.seed
self.batch_size=args.batch_size
self.pc_valid=args.pc_valid
self.root = args.data_dir
self.latent_dim = args.latent_dim
self.use_memory = args.use_memory
self.num_tasks = args.ntasks
self.num_classes = 100
self.num_samples = args.samples
self.inputsize = [3,84,84]
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
self.transformation = transforms.Compose([
transforms.Resize((84,84)),
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std)])
self.taskcla = [[t, int(self.num_classes/self.num_tasks)] for t in range(self.num_tasks)]
self.indices = {}
self.dataloaders = {}
self.idx={}
self.num_workers = args.workers
self.pin_memory = True
np.random.seed(self.seed)
task_ids = np.split(np.random.permutation(self.num_classes),self.num_tasks)
self.task_ids = [list(arr) for arr in task_ids]
self.train_set = {}
self.train_split = {}
self.test_set = {}
self.task_memory = {}
for i in range(self.num_tasks):
self.task_memory[i] = {}
self.task_memory[i]['x'] = []
self.task_memory[i]['y'] = []
self.task_memory[i]['tt'] = []
self.task_memory[i]['td'] = []
def get(self, task_id):
self.dataloaders[task_id] = {}
sys.stdout.flush()
if task_id == 0:
memory_classes = None
memory=None
else:
memory_classes = self.task_ids
memory = self.task_memory
self.train_set[task_id] = iMiniImageNet(root=self.root, classes=self.task_ids[task_id],
memory_classes=memory_classes, memory=memory,
task_num=task_id, train=True, transform=self.transformation)
self.test_set[task_id] = iMiniImageNet(root=self.root, classes=self.task_ids[task_id], memory_classes=None,
memory=None, task_num=task_id, train=False, transform=self.transformation)
split = int(np.floor(self.pc_valid * len(self.train_set[task_id])))
train_split, valid_split = torch.utils.data.random_split(self.train_set[task_id], [len(self.train_set[task_id]) - split, split])
self.train_split[task_id] = train_split
train_loader = torch.utils.data.DataLoader(train_split, batch_size=self.batch_size, num_workers=self.num_workers,
pin_memory=self.pin_memory,shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_split, batch_size=int(self.batch_size * self.pc_valid),
num_workers=self.num_workers, pin_memory=self.pin_memory,shuffle=True)
test_loader = torch.utils.data.DataLoader(self.test_set[task_id], batch_size=self.batch_size, num_workers=self.num_workers,
pin_memory=self.pin_memory, shuffle=True)
self.dataloaders[task_id]['train'] = train_loader
self.dataloaders[task_id]['valid'] = valid_loader
self.dataloaders[task_id]['test'] = test_loader
self.dataloaders[task_id]['name'] = 'iMiniImageNet-{}-{}'.format(task_id,self.task_ids[task_id])
self.dataloaders[task_id]['tsne'] = torch.utils.data.DataLoader(self.test_set[task_id],
batch_size=len(test_loader.dataset),
num_workers=self.num_workers,
pin_memory=self.pin_memory, shuffle=True)
print ("Task ID: ", task_id)
print ("Training set size: {} images of {}x{}".format(len(train_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Validation set size: {} images of {}x{}".format(len(valid_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Train+Val set size: {} images of {}x{}".format(len(valid_loader.dataset)+len(train_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Test set size: {} images of {}x{}".format(len(test_loader.dataset),self.inputsize[1],self.inputsize[1]))
if self.use_memory == 'yes' and self.num_samples > 0 :
self.update_memory(task_id)
return self.dataloaders
def update_memory(self, task_id):
num_samples_per_class = self.num_samples // len(self.task_ids[task_id])
mem_class_mapping = {i: i for i, c in enumerate(self.task_ids[task_id])}
for i in range(len(self.task_ids[task_id])):
data_loader = torch.utils.data.DataLoader(self.train_split[task_id], batch_size=1,
num_workers=self.num_workers,
pin_memory=self.pin_memory)
randind = torch.randperm(len(data_loader.dataset))[:num_samples_per_class] # randomly sample some data
for ind in randind:
self.task_memory[task_id]['x'].append(data_loader.dataset[ind][0])
self.task_memory[task_id]['y'].append(mem_class_mapping[i])
self.task_memory[task_id]['tt'].append(data_loader.dataset[ind][2])
self.task_memory[task_id]['td'].append(data_loader.dataset[ind][3])
print ('Memory updated by adding {} images'.format(len(self.task_memory[task_id]['x']))) | Adversarial-Continual-Learning-main | ACL-resnet/src/dataloaders/miniimagenet.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
import os.path
import sys
import warnings
import urllib.request
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
import torch.utils.data as data
import numpy as np
import torch
from torchvision import datasets, transforms
from .utils import *
# from scipy.imageio import imread
import pandas as pd
import os
import torch
from PIL import Image
import scipy.io as sio
from collections import defaultdict
from itertools import chain
from collections import OrderedDict
class CIFAR10_(datasets.CIFAR10):
base_folder = 'cifar-10-batches-py'
url = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
filename = "cifar-10-python.tar.gz"
tgz_md5 = 'c58f30108f718f92721af3b95e74349a'
train_list = [
['data_batch_1', 'c99cafc152244af753f735de768cd75f'],
['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'],
['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'],
['data_batch_4', '634d18415352ddfa80567beed471001a'],
['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'],
]
test_list = [
['test_batch', '40351d587109b95175f43aff81a1287e'],
]
meta = {
'filename': 'batches.meta',
'key': 'label_names',
'md5': '5ff9c542aee3614f3951f8cda6e48888',
}
num_classes = 10
def __init__(self, root, task_num, num_samples_per_class, train, transform, target_transform, download=True):
# root, task_num, train, transform = None, download = False):
super(CIFAR10_, self).__init__(root, task_num, transform=transform,
target_transform=target_transform,
download=download)
# print(self.train)
# self.train = train # training set or test set
self.train = train # training set or test set
self.transform = transform
self.target_transform=target_transform
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
if self.train:
downloaded_list = self.train_list
else:
downloaded_list = self.test_list
if not num_samples_per_class:
self.data = []
self.targets = []
# now load the picked numpy arrays
for file_name, checksum in downloaded_list:
file_path = os.path.join(self.root, self.base_folder, file_name)
with open(file_path, 'rb') as f:
if sys.version_info[0] == 2:
entry = pickle.load(f)
else:
entry = pickle.load(f, encoding='latin1')
self.data.append(entry['data'])
if 'labels' in entry:
self.targets.extend(entry['labels'])
else:
self.targets.extend(entry['fine_labels'])
else:
x, y, tt, td = [], [], [], []
for l in range(self.num_classes):
indices_with_label_l = np.where(np.array(self.targets)==l)
x_with_label_l = [self.data[item] for item in indices_with_label_l[0]]
y_with_label_l = [l]*len(x_with_label_l)
# If we need a subset of the dataset with num_samples_per_class we use this and then concatenate it with a complete dataset
shuffled_indices = np.random.permutation(len(x_with_label_l))[:num_samples_per_class]
x_with_label_l = [x_with_label_l[item] for item in shuffled_indices]
y_with_label_l = [y_with_label_l[item] for item in shuffled_indices]
x.append(x_with_label_l)
y.append(y_with_label_l)
self.data = np.array(sum(x,[]))
self.targets = sum(y,[])
self.data = np.vstack(self.data).reshape(-1, 3, 32, 32)
self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC
self.tt = [task_num for _ in range(len(self.data))]
self.td = [task_num + 1 for _ in range(len(self.data))]
self._load_meta()
def __getitem__(self, index):
img, target, tt, td = self.data[index], self.targets[index], self.tt[index], self.td[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
try:
img = Image.fromarray(img)
except:
pass
try:
if self.transform is not None: img = self.transform(img)
except:
pass
try:
if self.target_transform is not None: tt = self.target_transform(tt)
if self.target_transform is not None: td = self.target_transform(td)
except:
pass
return img, target, tt, td
def __len__(self):
# if self.train:
return len(self.data)
# else:
# return len(self.test_data)
def report_size(self):
print("CIFAR10 size at train={} time: {} ".format(self.train,self.__len__()))
def _load_meta(self):
path = os.path.join(self.root, self.base_folder, self.meta['filename'])
if not check_integrity(path, self.meta['md5']):
raise RuntimeError('Dataset metadata file not found or corrupted.' +
' You can use download=True to download it')
with open(path, 'rb') as infile:
if sys.version_info[0] == 2:
data = pickle.load(infile)
else:
data = pickle.load(infile, encoding='latin1')
self.classes = data[self.meta['key']]
self.class_to_idx = {_class: i for i, _class in enumerate(self.classes)}
class CIFAR100_(CIFAR10_):
"""`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
This is a subclass of the `CIFAR10` Dataset.
"""
base_folder = 'cifar-100-python'
url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
meta = {
'filename': 'meta',
'key': 'fine_label_names',
'md5': '7973b15100ade9c7d40fb424638fde48',
}
num_classes = 100
class SVHN_(torch.utils.data.Dataset):
url = ""
filename = ""
file_md5 = ""
split_list = {
'train': ["http://ufldl.stanford.edu/housenumbers/train_32x32.mat",
"train_32x32.mat", "e26dedcc434d2e4c54c9b2d4a06d8373"],
'test': ["http://ufldl.stanford.edu/housenumbers/test_32x32.mat",
"test_32x32.mat", "eb5a983be6a315427106f1b164d9cef3"],
'extra': ["http://ufldl.stanford.edu/housenumbers/extra_32x32.mat",
"extra_32x32.mat", "a93ce644f1a588dc4d68dda5feec44a7"]}
def __init__(self, root, task_num, num_samples_per_class, train,transform=None, target_transform=None, download=True):
self.root = os.path.expanduser(root)
# root, task_num, train, transform = None, download = False):
# print(self.train)
# self.train = train # training set or test set
self.train = train # training set or test set
self.transform = transform
self.target_transform=target_transform
if self.train:
split="train"
else:
split="test"
self.num_classes = 10
self.split = verify_str_arg(split, "split", tuple(self.split_list.keys()))
self.url = self.split_list[split][0]
self.filename = self.split_list[split][1]
self.file_md5 = self.split_list[split][2]
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
# import here rather than at top of file because this is
# an optional dependency for torchvision
import scipy.io as sio
# reading(loading) mat file as array
loaded_mat = sio.loadmat(os.path.join(self.root, self.filename))
self.data = loaded_mat['X']
# loading from the .mat file gives an np array of type np.uint8
# converting to np.int64, so that we have a LongTensor after
# the conversion from the numpy array
# the squeeze is needed to obtain a 1D tensor
self.targets = loaded_mat['y'].astype(np.int64).squeeze()
self.data = np.transpose(self.data, (3, 2, 0, 1))
if num_samples_per_class:
x, y, tt, td = [], [], [], []
for l in range(self.num_classes+1):
indices_with_label_l = np.where(np.array(self.targets)==l)
x_with_label_l = [self.data[item] for item in indices_with_label_l[0]]
y_with_label_l = [l]*len(x_with_label_l)
# If we need a subset of the dataset with num_samples_per_class we use this and then concatenate it with a complete dataset
shuffled_indices = np.random.permutation(len(x_with_label_l))[:num_samples_per_class]
x_with_label_l = [x_with_label_l[item] for item in shuffled_indices]
y_with_label_l = [y_with_label_l[item] for item in shuffled_indices]
x.append(x_with_label_l)
y.append(y_with_label_l)
self.data = np.array(sum(x,[]))
self.targets = np.array(sum(y,[])).astype(np.int64)
# the svhn dataset assigns the class label "10" to the digit 0
# this makes it inconsistent with several loss functions
# which expect the class labels to be in the range [0, C-1]
np.place(self.targets, self.targets == 10, 0)
# print ("svhn: ", self.data.shape)
self.tt = [task_num for _ in range(len(self.data))]
self.td = [task_num+1 for _ in range(len(self.data))]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target, tt, td = self.data[index], int(self.targets[index]), self.tt[index], self.td[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
try:
img = Image.fromarray(np.transpose(img, (1, 2, 0)))
except:
pass
try:
if self.transform is not None: img = self.transform(img)
except:
pass
try:
if self.target_transform is not None: tt = self.target_transform(tt)
if self.target_transform is not None: td = self.target_transform(td)
except:
pass
return img, target, tt, td
def __len__(self):
return len(self.data)
def _check_integrity(self):
root = self.root
md5 = self.split_list[self.split][2]
fpath = os.path.join(root, self.filename)
return check_integrity(fpath, md5)
def download(self):
md5 = self.split_list[self.split][2]
download_url(self.url, self.root, self.filename, md5)
def extra_repr(self):
return "Split: {split}".format(**self.__dict__)
class MNIST_RGB(datasets.MNIST):
def __init__(self, root, task_num, num_samples_per_class, train=True, transform=None, target_transform=None, download=False):
super(MNIST_RGB, self).__init__(root, task_num, transform=transform,
target_transform=target_transform,
download=download)
self.train = train # training set or test set
self.target_transform=target_transform
self.transform=transform
self.num_classes=10
if download:
self.download()
if not self._check_exists():
raise RuntimeError('Dataset not found.' +
' You can use download=True to download it')
if self.train:
data_file = self.training_file
else:
data_file = self.test_file
# self.data, self.targets = torch.load(os.path.join(self.processed_folder, data_file))
self.data, self.targets = torch.load(os.path.join(self.processed_folder, data_file))
self.data=np.array(self.data).astype(np.float32)
self.targets=list(np.array(self.targets))
if num_samples_per_class:
x, y, tt, td = [], [], [], []
for l in range(self.num_classes):
indices_with_label_l = np.where(np.array(self.targets)==l)
x_with_label_l = [self.data[item] for item in indices_with_label_l[0]]
# y_with_label_l = [l]*len(x_with_label_l)
# If we need a subset of the dataset with num_samples_per_class we use this and then concatenate it with a complete dataset
shuffled_indices = np.random.permutation(len(x_with_label_l))[:num_samples_per_class]
x_with_label_l = [x_with_label_l[item] for item in shuffled_indices]
y_with_label_l = [l]*len(shuffled_indices)
x.append(x_with_label_l)
y.append(y_with_label_l)
self.data = np.array(sum(x,[]))
self.targets = sum(y,[])
self.tt = [task_num for _ in range(len(self.data))]
self.td = [task_num+1 for _ in range(len(self.data))]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target, tt, td = self.data[index], int(self.targets[index]), self.tt[index], self.td[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
try:
img = Image.fromarray(img, mode='L').convert('RGB')
except:
pass
try:
if self.transform is not None: img = self.transform(img)
except:
pass
try:
if self.target_transform is not None: tt = self.target_transform(tt)
if self.target_transform is not None: td = self.target_transform(td)
except:
pass
return img, target, tt, td
def __len__(self):
return len(self.data)
@property
def raw_folder(self):
return os.path.join(self.root, self.__class__.__name__, 'raw')
@property
def processed_folder(self):
return os.path.join(self.root, self.__class__.__name__, 'processed')
@property
def class_to_idx(self):
return {_class: i for i, _class in enumerate(self.classes)}
def _check_exists(self):
return (os.path.exists(os.path.join(self.processed_folder,
self.training_file)) and
os.path.exists(os.path.join(self.processed_folder,
self.test_file)))
def download(self):
"""Download the MNIST data if it doesn't exist in processed_folder already."""
if self._check_exists():
return
makedir_exist_ok(self.raw_folder)
makedir_exist_ok(self.processed_folder)
# download files
for url in self.urls:
filename = url.rpartition('/')[2]
download_and_extract_archive(url, download_root=self.raw_folder, filename=filename)
# process and save as torch files
print('Processing...')
training_set = (
read_image_file(os.path.join(self.raw_folder, 'train-images-idx3-ubyte')),
read_label_file(os.path.join(self.raw_folder, 'train-labels-idx1-ubyte'))
)
test_set = (
read_image_file(os.path.join(self.raw_folder, 't10k-images-idx3-ubyte')),
read_label_file(os.path.join(self.raw_folder, 't10k-labels-idx1-ubyte'))
)
with open(os.path.join(self.processed_folder, self.training_file), 'wb') as f:
torch.save(training_set, f)
with open(os.path.join(self.processed_folder, self.test_file), 'wb') as f:
torch.save(test_set, f)
print('Done!')
def extra_repr(self):
return "Split: {}".format("Train" if self.train is True else "Test")
class FashionMNIST_(MNIST_RGB):
"""`Fashion MNIST <https://github.com/zalandoresearch/fashion-mnist>`_ Dataset.
"""
urls = [
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz',
]
class notMNIST_(torch.utils.data.Dataset):
def __init__(self, root, task_num, num_samples_per_class, train,transform=None, target_transform=None, download=False):
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform=target_transform
self.train = train
self.url = "https://github.com/facebookresearch/Adversarial-Continual-Learning/raw/master/data/notMNIST.zip"
self.filename = 'notMNIST.zip'
fpath = os.path.join(root, self.filename)
if not os.path.isfile(fpath):
if not download:
raise RuntimeError('Dataset not found. You can use download=True to download it')
else:
print('Downloading from '+self.url)
download_url(self.url, root, filename=self.filename)
import zipfile
zip_ref = zipfile.ZipFile(fpath, 'r')
zip_ref.extractall(root)
zip_ref.close()
if self.train:
fpath = os.path.join(root, 'notMNIST', 'Train')
else:
fpath = os.path.join(root, 'notMNIST', 'Test')
X, Y = [], []
folders = os.listdir(fpath)
for folder in folders:
folder_path = os.path.join(fpath, folder)
for ims in os.listdir(folder_path):
try:
img_path = os.path.join(folder_path, ims)
X.append(np.array(Image.open(img_path).convert('RGB')))
Y.append(ord(folder) - 65) # Folders are A-J so labels will be 0-9
except:
print("File {}/{} is broken".format(folder, ims))
self.data = np.array(X)
self.targets = Y
self.num_classes = len(set(self.targets))
if num_samples_per_class:
x, y, tt, td = [], [], [], []
for l in range(self.num_classes):
indices_with_label_l = np.where(np.array(self.targets)==l)
x_with_label_l = [self.data[item] for item in indices_with_label_l[0]]
# If we need a subset of the dataset with num_samples_per_class we use this and then concatenate it with a complete dataset
shuffled_indices = np.random.permutation(len(x_with_label_l))[:num_samples_per_class]
x_with_label_l = [x_with_label_l[item] for item in shuffled_indices]
y_with_label_l = [l]*len(shuffled_indices)
x.append(x_with_label_l)
y.append(y_with_label_l)
self.data = np.array(sum(x,[]))
self.labels = sum(y,[])
self.tt = [task_num for _ in range(len(self.data))]
self.td = [task_num + 1 for _ in range(len(self.data))]
def __getitem__(self, index):
img, target, tt, td = self.data[index], self.targets[index], self.tt[index], self.td[index]
img = Image.fromarray(img)#.convert('RGB')
img = self.transform(img)
return img, target, tt, td
def __len__(self):
return len(self.data)
def download(self):
"""Download the notMNIST data if it doesn't exist in processed_folder already."""
import errno
root = os.path.expanduser(self.root)
fpath = os.path.join(root, self.filename)
try:
os.makedirs(root)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
urllib.request.urlretrieve(self.url, fpath)
import zipfile
zip_ref = zipfile.ZipFile(fpath, 'r')
zip_ref.extractall(root)
zip_ref.close()
| Adversarial-Continual-Learning-main | ACL-resnet/src/dataloaders/datasets_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import utils
class Shared(torch.nn.Module):
def __init__(self,args):
super(Shared, self).__init__()
self.ncha,size,_=args.inputsize
self.taskcla=args.taskcla
self.latent_dim = args.latent_dim
if args.experiment == 'cifar100':
hiddens = [64, 128, 256, 1024, 1024, 512]
elif args.experiment == 'miniimagenet':
hiddens = [64, 128, 256, 512, 512, 512]
# ----------------------------------
elif args.experiment == 'multidatasets':
hiddens = [64, 128, 256, 1024, 1024, 512]
else:
raise NotImplementedError
self.conv1=torch.nn.Conv2d(self.ncha,hiddens[0],kernel_size=size//8)
s=utils.compute_conv_output_size(size,size//8)
s=s//2
self.conv2=torch.nn.Conv2d(hiddens[0],hiddens[1],kernel_size=size//10)
s=utils.compute_conv_output_size(s,size//10)
s=s//2
self.conv3=torch.nn.Conv2d(hiddens[1],hiddens[2],kernel_size=2)
s=utils.compute_conv_output_size(s,2)
s=s//2
self.maxpool=torch.nn.MaxPool2d(2)
self.relu=torch.nn.ReLU()
self.drop1=torch.nn.Dropout(0.2)
self.drop2=torch.nn.Dropout(0.5)
self.fc1=torch.nn.Linear(hiddens[2]*s*s,hiddens[3])
self.fc2=torch.nn.Linear(hiddens[3],hiddens[4])
self.fc3=torch.nn.Linear(hiddens[4],hiddens[5])
self.fc4=torch.nn.Linear(hiddens[5], self.latent_dim)
def forward(self, x_s):
x_s = x_s.view_as(x_s)
h = self.maxpool(self.drop1(self.relu(self.conv1(x_s))))
h = self.maxpool(self.drop1(self.relu(self.conv2(h))))
h = self.maxpool(self.drop2(self.relu(self.conv3(h))))
h = h.view(x_s.size(0), -1)
h = self.drop2(self.relu(self.fc1(h)))
h = self.drop2(self.relu(self.fc2(h)))
h = self.drop2(self.relu(self.fc3(h)))
h = self.drop2(self.relu(self.fc4(h)))
return h
class Private(torch.nn.Module):
def __init__(self, args):
super(Private, self).__init__()
self.ncha,self.size,_=args.inputsize
self.taskcla=args.taskcla
self.latent_dim = args.latent_dim
self.num_tasks = args.ntasks
self.device = args.device
if args.experiment == 'cifar100':
hiddens=[32,32]
flatten=1152
elif args.experiment == 'miniimagenet':
# hiddens=[8,8]
# flatten=1800
hiddens=[16,16]
flatten=3600
elif args.experiment == 'multidatasets':
hiddens=[32,32]
flatten=1152
else:
raise NotImplementedError
self.task_out = torch.nn.Sequential()
self.task_out.add_module('conv1', torch.nn.Conv2d(self.ncha, hiddens[0], kernel_size=self.size // 8))
self.task_out.add_module('relu1', torch.nn.ReLU(inplace=True))
self.task_out.add_module('drop1', torch.nn.Dropout(0.2))
self.task_out.add_module('maxpool1', torch.nn.MaxPool2d(2))
self.task_out.add_module('conv2', torch.nn.Conv2d(hiddens[0], hiddens[1], kernel_size=self.size // 10))
self.task_out.add_module('relu2', torch.nn.ReLU(inplace=True))
self.task_out.add_module('dropout2', torch.nn.Dropout(0.5))
self.task_out.add_module('maxpool2', torch.nn.MaxPool2d(2))
self.linear = torch.nn.Sequential()
self.linear.add_module('linear1', torch.nn.Linear(flatten, self.latent_dim))
self.linear.add_module('relu3', torch.nn.ReLU(inplace=True))
def forward(self, x):
x = x.view_as(x)
out = self.task_out(x)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
# def forward(self, x, task_id):
# x = x.view_as(x)
# out = self.task_out[2*task_id].forward(x)
# out = out.view(out.size(0),-1)
# out = self.task_out[2*task_id+1].forward(out)
# return out
class Net(torch.nn.Module):
def __init__(self, args):
super(Net, self).__init__()
self.ncha,size,_=args.inputsize
self.taskcla=args.taskcla
self.latent_dim = args.latent_dim
self.ntasks = args.ntasks
self.samples = args.samples
self.image_size = self.ncha*size*size
self.args=args
self.hidden1 = args.head_units
self.hidden2 = args.head_units//2
self.shared = Shared(args)
self.private = Private(args)
self.head = torch.nn.Sequential(
torch.nn.Linear(2*self.latent_dim, self.hidden1),
torch.nn.ReLU(inplace=True),
torch.nn.Dropout(),
torch.nn.Linear(self.hidden1, self.hidden2),
torch.nn.ReLU(inplace=True),
torch.nn.Linear(self.hidden2, self.taskcla[0][1])
)
def forward(self, x_s, x_p, tt=None):
x_s = x_s.view_as(x_s)
x_p = x_p.view_as(x_p)
# x_s = self.shared(x_s)
# x_p = self.private(x_p)
#
# x = torch.cat([x_p, x_s], dim=1)
# if self.args.experiment == 'multidatasets':
# # if no memory is used this is faster:
# y=[]
# for i,_ in self.taskcla:
# y.append(self.head[i](x))
# return y[task_id]
# else:
# return torch.stack([self.head[tt[i]].forward(x[i]) for i in range(x.size(0))])
# if torch.is_tensor(tt):
# return torch.stack([self.head[tt[i]].forward(x[i]) for i in range(x.size(0))])
# else:
# return self.head(x)
output = {}
output['shared'] = self.shared(x_s)
output['private'] = self.private(x_p)
concat_features = torch.cat([output['private'], output['shared']], dim=1)
if torch.is_tensor(tt):
output['out'] = torch.stack([self.head[tt[i]].forward(concat_features[i]) for i in range(
concat_features.size(0))])
else:
output['out'] = self.head(concat_features)
return output
# def get_encoded_ftrs(self, x_s, x_p, task_id=None):
# return self.shared(x_s), self.private(x_p)
def print_model_size(self):
count_P = sum(p.numel() for p in self.private.parameters() if p.requires_grad)
count_S = sum(p.numel() for p in self.shared.parameters() if p.requires_grad)
count_H = sum(p.numel() for p in self.head.parameters() if p.requires_grad)
print("Size of the network for one task including (S+P+p)")
print('Num parameters in S = %s ' % (self.pretty_print(count_S)))
print('Num parameters in P = %s ' % (self.pretty_print(count_P)))
print('Num parameters in p = %s ' % (self.pretty_print(count_H)))
print('Num parameters in P+p = %s ' % self.pretty_print(count_P + count_H))
print('--------------------------> Architecture size in total for all tasks: %s parameters (%sB)' % (
self.pretty_print(count_S + self.ntasks*count_P + self.ntasks*count_H),
self.pretty_print(4 * (count_S + self.ntasks*count_P + self.ntasks*count_H))))
classes_per_task = self.taskcla[0][1]
print("--------------------------> Memory size: %s samples per task (%sB)" % (self.samples*classes_per_task,
self.pretty_print(
self.ntasks * 4 * self.samples * classes_per_task* self.image_size)))
print("------------------------------------------------------------------------------")
print(" TOTAL: %sB" % self.pretty_print(
4 * (count_S + self.ntasks *count_P + self.ntasks *count_H) + self.ntasks * 4 * self.samples * classes_per_task * self.image_size))
def pretty_print(self, num):
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
return '%.1f%s' % (num, ['', 'K', 'M', 'G', 'T', 'P'][magnitude])
| Adversarial-Continual-Learning-main | ACL-resnet/src/networks/alexnet_acl.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
class Shared(torch.nn.Module):
def __init__(self,args):
super(Shared, self).__init__()
self.taskcla=args.taskcla
self.latent_dim = args.latent_dim
ncha,size,_ = args.inputsize
self.pretrained = False
if args.experiment == 'cifar100':
hiddens = [64, 128, 256]
elif args.experiment == 'miniimagenet':
hiddens = [1024, 512, 256]
else:
raise NotImplementedError
# Small resnet
resnet = resnet18_small(self.latent_dim, shared=True)
self.features = torch.nn.Sequential(*list(resnet.children())[:-2])
if args.experiment == 'miniimagenet':
# num_ftrs = 4608
num_ftrs = 2304 # without average pool (-2)
elif args.experiment == 'cifar100':
# num_ftrs = 25088 # without average pool
num_ftrs = 256
else:
raise NotImplementedError
self.relu=torch.nn.ReLU()
self.drop1=torch.nn.Dropout(0.2)
self.drop2=torch.nn.Dropout(0.5)
self.fc1=torch.nn.Linear(num_ftrs,hiddens[0])
self.fc2=torch.nn.Linear(hiddens[0],hiddens[1])
self.fc3=torch.nn.Linear(hiddens[1],hiddens[1])
self.fc4=torch.nn.Linear(hiddens[1], self.latent_dim)
def forward(self, x):
x = x.view_as(x)
x = self.features(x)
x = torch.flatten(x, 1)
x = self.drop2(self.relu(self.fc1(x)))
x = self.drop2(self.relu(self.fc2(x)))
x = self.drop2(self.relu(self.fc3(x)))
x = self.drop2(self.relu(self.fc4(x)))
return x
class Net(torch.nn.Module):
def __init__(self, args):
super(Net, self).__init__()
ncha,size,_=args.inputsize
self.image_size = ncha * size * size
self.taskcla = args.taskcla
self.latent_dim = args.latent_dim
self.ntasks = args.ntasks
self.samples = args.samples
self.image_size = ncha * size * size
self.use_memory = args.use_memory
self.hidden1 = args.head_units
self.hidden2 = args.head_units
self.shared = Shared(args)
self.private = resnet18_small(self.latent_dim, shared=False)
self.head = torch.nn.Sequential(
torch.nn.Linear(2*self.latent_dim, self.hidden1),
torch.nn.ReLU(inplace=True),
torch.nn.Dropout(),
torch.nn.Linear(self.hidden1, self.hidden2),
torch.nn.ReLU(inplace=True),
torch.nn.Linear(self.hidden2, self.taskcla[0][1])
)
def forward(self, x_s, x_p, tt=None):
x_s = x_s.view_as(x_s)
x_p = x_p.view_as(x_p)
# x_s = self.shared(x_s)
# x_p = self.private(x_p)
# x = torch.cat([x_p, x_s], dim=1)
# if self.args.experiment == 'multidatasets':
# # if no memory is used this is faster:
# y=[]
# for i,_ in self.taskcla:
# y.append(self.head[i](x))
# return y[task_id]
# else:
# return torch.stack([self.head[tt[i]].forward(x[i]) for i in range(x.size(0))])
output = {}
output['shared'] = self.shared(x_s)
output['private'] = self.private(x_p)
concat_features = torch.cat([output['private'], output['shared']], dim=1)
if torch.is_tensor(tt):
output['out'] = torch.stack([self.head[tt[i]].forward(concat_features[i]) for i in range(
concat_features.size(0))])
else:
output['out'] = self.head(concat_features)
return output
# output['shared'] = self.shared(x_s)
# output['private'] = self.private(x_p)
#
# concat_features = torch.cat([output['private'], output['shared']], dim=1)
#
# if torch.is_tensor(tt):
#
# output['out'] = torch.stack([self.head[tt[i]].forward(concat_features[i]) for i in range(concat_features.size(0))])
# else:
# if self.use_memory == 'no':
# output['out'] = self.head.forward(concat_features)
#
# elif self.use_memory == 'yes':
# y = []
# for i, _ in self.taskcla:
# y.append(self.head[i](concat_features))
# output['out'] = y[task_id]
#
# return output
# def get_encoded_ftrs(self, x_s, x_p, task_id=None):
# return self.shared(x_s), self.private(x_p)
def print_model_size(self):
count_P = sum(p.numel() for p in self.private.parameters() if p.requires_grad)
count_S = sum(p.numel() for p in self.shared.parameters() if p.requires_grad)
count_H = sum(p.numel() for p in self.head.parameters() if p.requires_grad)
print("Size of the network for one task including (S+P+p)")
print('Num parameters in S = %s ' % (self.pretty_print(count_S)))
print('Num parameters in P = %s ' % (self.pretty_print(count_P)))
print('Num parameters in p = %s ' % (self.pretty_print(count_H)))
print('Num parameters in P+p = %s ' % self.pretty_print(count_P + count_H))
print('--------------------------> Architecture size in total for all tasks: %s parameters (%sB)' % (
self.pretty_print(count_S + self.ntasks*count_P + self.ntasks*count_H),
self.pretty_print(4 * (count_S + self.ntasks*count_P + self.ntasks*count_H))))
classes_per_task = self.taskcla[0][1]
print("--------------------------> Memory size: %s samples per task (%sB)" % (self.samples*classes_per_task,
self.pretty_print(
self.ntasks * 4 * self.samples * classes_per_task* self.image_size)))
print("------------------------------------------------------------------------------")
print(" TOTAL: %sB" % self.pretty_print(
4 * (count_S + self.ntasks *count_P + self.ntasks *count_H) + self.ntasks * 4 * self.samples * classes_per_task * self.image_size))
def pretty_print(self, num):
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
return '%.1f%s' % (num, ['', 'K', 'M', 'G', 'T', 'P'][magnitude])
class _CustomDataParallel(torch.nn.DataParallel):
def __init__(self, model):
super(_CustomDataParallel, self).__init__(model)
def __getattr__(self, name):
try:
return super(_CustomDataParallel, self).__getattr__(name)
except AttributeError:
return getattr(self.module, name)
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, shared, block, layers, num_classes, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
# small resnet
if shared:
hiddens = [32, 64, 128, 256]
else:
hiddens = [16, 32, 32, 64]
# original resnet
# hiddens = [64, 128, 256, 512]
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, hiddens[0], layers[0])
self.layer2 = self._make_layer(block, hiddens[1], layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, hiddens[2], layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, hiddens[3], layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(hiddens[3] * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
x = self.relu(x)
return x
def forward(self, x):
return self._forward_impl(x)
def resnet18_small(latend_dim, shared):
# r"""ResNet-18 model from
# `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
return ResNet(shared, BasicBlock, [2, 2, 2, 2], num_classes=latend_dim)
| Adversarial-Continual-Learning-main | ACL-resnet/src/networks/resnet_acl.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
class Private(torch.nn.Module):
def __init__(self, args):
super(Private, self).__init__()
self.ncha,self.size,_=args.inputsize
self.taskcla=args.taskcla
self.latent_dim = args.latent_dim
self.num_tasks = args.ntasks
self.nhid = args.units
self.device = args.device
self.task_out = torch.nn.ModuleList()
for _ in range(self.num_tasks):
self.linear = torch.nn.Sequential()
self.linear.add_module('linear', torch.nn.Linear(self.ncha*self.size*self.size, self.latent_dim))
self.linear.add_module('relu', torch.nn.ReLU(inplace=True))
self.task_out.append(self.linear)
def forward(self, x_p, task_id):
x_p = x_p.view(x_p.size(0), -1)
return self.task_out[task_id].forward(x_p)
class Shared(torch.nn.Module):
def __init__(self,args):
super(Shared, self).__init__()
ncha,self.size,_=args.inputsize
self.taskcla=args.taskcla
self.latent_dim = args.latent_dim
self.nhid = args.units
self.nlayers = args.nlayers
self.relu=torch.nn.ReLU()
self.drop=torch.nn.Dropout(0.2)
self.fc1=torch.nn.Linear(ncha*self.size*self.size, self.nhid)
if self.nlayers == 3:
self.fc2 = torch.nn.Linear(self.nhid, self.nhid)
self.fc3=torch.nn.Linear(self.nhid,self.latent_dim)
else:
self.fc2 = torch.nn.Linear(self.nhid,self.latent_dim)
def forward(self, x_s):
h = x_s.view(x_s.size(0), -1)
h = self.drop(self.relu(self.fc1(h)))
h = self.drop(self.relu(self.fc2(h)))
if self.nlayers == 3:
h = self.drop(self.relu(self.fc3(h)))
return h
class Net(torch.nn.Module):
def __init__(self, args):
super(Net, self).__init__()
ncha,size,_=args.inputsize
self.taskcla=args.taskcla
self.latent_dim = args.latent_dim
self.num_tasks = args.ntasks
self.device = args.device
if args.experiment == 'mnist5':
self.hidden1 = 28
self.hidden2 = 14
elif args.experiment == 'pmnist':
self.hidden1 = 28
self.hidden2 = 28
self.samples = args.samples
self.shared = Shared(args)
self.private = Private(args)
self.head = torch.nn.ModuleList()
for i in range(self.num_tasks):
self.head.append(
torch.nn.Sequential(
torch.nn.Linear(2 * self.latent_dim, self.hidden1),
torch.nn.ReLU(inplace=True),
torch.nn.Dropout(),
torch.nn.Linear(self.hidden1, self.hidden2),
torch.nn.ReLU(inplace=True),
torch.nn.Linear(self.hidden2, self.taskcla[i][1])
))
def forward(self,x_s, x_p, tt, task_id):
h_s = x_s.view(x_s.size(0), -1)
h_p = x_s.view(x_p.size(0), -1)
x_s = self.shared(h_s)
x_p = self.private(h_p, task_id)
x = torch.cat([x_p, x_s], dim=1)
return torch.stack([self.head[tt[i]].forward(x[i]) for i in range(x.size(0))])
def get_encoded_ftrs(self, x_s, x_p, task_id):
return self.shared(x_s), self.private(x_p, task_id)
def print_model_size(self):
count_P = sum(p.numel() for p in self.private.parameters() if p.requires_grad)
count_S = sum(p.numel() for p in self.shared.parameters() if p.requires_grad)
count_H = sum(p.numel() for p in self.head.parameters() if p.requires_grad)
print('Num parameters in S = %s ' % (self.pretty_print(count_S)))
print('Num parameters in P = %s, per task = %s ' % (self.pretty_print(count_P),self.pretty_print(count_P/self.num_tasks)))
print('Num parameters in p = %s, per task = %s ' % (self.pretty_print(count_H),self.pretty_print(count_H/self.num_tasks)))
print('Num parameters in P+p = %s ' % self.pretty_print(count_P+count_H))
print('--------------------------> Total architecture size: %s parameters (%sB)' % (self.pretty_print(count_S + count_P + count_H),
self.pretty_print(4*(count_S + count_P + count_H))))
def pretty_print(self, num):
magnitude=0
while abs(num) >= 1000:
magnitude+=1
num/=1000.0
return '%.2f%s' % (num, ['', 'K', 'M', 'G', 'T', 'P'][magnitude])
| Adversarial-Continual-Learning-main | ACL-resnet/src/networks/mlp_acl.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import utils
class Discriminator(torch.nn.Module):
def __init__(self,args,task_id):
super(Discriminator, self).__init__()
self.num_tasks=args.ntasks
self.units=args.units
self.latent_dim=args.latent_dim
if args.diff == 'yes':
self.dis = torch.nn.Sequential(
GradientReversal(args.lam),
torch.nn.Linear(self.latent_dim, args.units),
torch.nn.LeakyReLU(),
torch.nn.Linear(args.units, args.units),
torch.nn.Linear(args.units, task_id + 2)
)
else:
self.dis = torch.nn.Sequential(
torch.nn.Linear(self.latent_dim, args.units),
torch.nn.LeakyReLU(),
torch.nn.Linear(args.units, args.units),
torch.nn.Linear(args.units, task_id + 2)
)
def forward(self, z):
return self.dis(z)
def pretty_print(self, num):
magnitude=0
while abs(num) >= 1000:
magnitude+=1
num/=1000.0
return '%.1f%s' % (num, ['', 'K', 'M', 'G', 'T', 'P'][magnitude])
def get_size(self):
count=sum(p.numel() for p in self.dis.parameters() if p.requires_grad)
print('Num parameters in D = %s ' % (self.pretty_print(count)))
class GradientReversalFunction(torch.autograd.Function):
"""
From:
https://github.com/jvanvugt/pytorch-domain-adaptation/blob/cb65581f20b71ff9883dd2435b2275a1fd4b90df/utils.py#L26
Gradient Reversal Layer from:
Unsupervised Domain Adaptation by Backpropagation (Ganin & Lempitsky, 2015)
Forward pass is the identity function. In the backward pass,
the upstream gradients are multiplied by -lambda (i.e. gradient is reversed)
"""
@staticmethod
def forward(ctx, x, lambda_):
ctx.lambda_ = lambda_
return x.clone()
@staticmethod
def backward(ctx, grads):
lambda_ = ctx.lambda_
lambda_ = grads.new_tensor(lambda_)
dx = -lambda_ * grads
return dx, None
class GradientReversal(torch.nn.Module):
def __init__(self, lambda_):
super(GradientReversal, self).__init__()
self.lambda_ = lambda_
def forward(self, x):
return GradientReversalFunction.apply(x, self.lambda_) | Adversarial-Continual-Learning-main | ACL-resnet/src/networks/discriminator.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import pickle
import numpy as np
import os
np.random.seed(1234)
# we want 500 for training, 100 for test for wach class
n = 500
def get_total(data):
data_x, data_y = [], []
for k, v in data.items():
for i in range(len(v)):
data_x.append(v[i])
data_y.append(k)
d = {}
d['images'] = data_x
d['labels'] = data_y
return d
# loading the pickled data
with open(os.path.join('../data/miniimagenet/data.pkl'), 'rb') as f:
data_dict = pickle.load(f)
data = data_dict['images']
labels = data_dict['labels']
# split data into classes, 600 images per class
class_dict = {}
for i in range(len(set(labels))):
class_dict[i] = []
for i in range(len(data)):
class_dict[labels[i]].append(data[i])
# Split data for each class to 500 and 100
x_train, x_test = {}, {}
for i in range(len(set(labels))):
np.random.shuffle(class_dict[i])
x_test[i] = class_dict[i][n:]
x_train[i] = class_dict[i][:n]
# mix the data
d_train = get_total(x_train)
d_test = get_total(x_test)
with open(os.path.join('../data/miniimagenet/train.pkl'), 'wb') as f:
pickle.dump(d_train, f)
with open(os.path.join('../data/miniimagenet/test.pkl'), 'wb') as f:
pickle.dump(d_test, f) | Adversarial-Continual-Learning-main | data/split_miniimagenet.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys, time, os
import numpy as np
import torch
import copy
import utils
from copy import deepcopy
from tqdm import tqdm
sys.path.append('../')
from networks.discriminator import Discriminator
class ACL(object):
def __init__(self, model, args, network):
self.args=args
self.nepochs=args.nepochs
self.sbatch=args.batch_size
# optimizer & adaptive lr
self.e_lr=args.e_lr
self.d_lr=args.d_lr
if not args.experiment == 'multidatasets':
self.e_lr=[args.e_lr] * args.ntasks
self.d_lr=[args.d_lr] * args.ntasks
else:
self.e_lr = [self.args.lrs[i][1] for i in range(len(args.lrs))]
self.d_lr = [self.args.lrs[i][1]/10. for i in range(len(args.lrs))]
print ("d_lrs : ", self.d_lr)
self.lr_min=args.lr_min
self.lr_factor=args.lr_factor
self.lr_patience=args.lr_patience
self.samples=args.samples
self.device=args.device
self.checkpoint=args.checkpoint
self.adv_loss_reg=args.adv
self.diff_loss_reg=args.orth
self.s_steps=args.s_step
self.d_steps=args.d_step
self.diff=args.diff
self.network=network
self.inputsize=args.inputsize
self.taskcla=args.taskcla
self.num_tasks=args.ntasks
# Initialize generator and discriminator
self.model=model
self.discriminator=self.get_discriminator(0)
self.discriminator.get_size()
self.latent_dim=args.latent_dim
self.task_loss=torch.nn.CrossEntropyLoss().to(self.device)
self.adversarial_loss_d=torch.nn.CrossEntropyLoss().to(self.device)
self.adversarial_loss_s=torch.nn.CrossEntropyLoss().to(self.device)
self.diff_loss=DiffLoss().to(self.device)
self.optimizer_S=self.get_S_optimizer(0)
self.optimizer_D=self.get_D_optimizer(0)
self.task_encoded={}
self.mu=0.0
self.sigma=1.0
print()
def get_discriminator(self, task_id):
discriminator=Discriminator(self.args, task_id).to(self.args.device)
return discriminator
def get_S_optimizer(self, task_id, e_lr=None):
if e_lr is None: e_lr=self.e_lr[task_id]
optimizer_S=torch.optim.SGD(self.model.parameters(), momentum=self.args.mom,
weight_decay=self.args.e_wd, lr=e_lr)
return optimizer_S
def get_D_optimizer(self, task_id, d_lr=None):
if d_lr is None: d_lr=self.d_lr[task_id]
optimizer_D=torch.optim.SGD(self.discriminator.parameters(), weight_decay=self.args.d_wd, lr=d_lr)
return optimizer_D
def train(self, task_id, dataset):
self.discriminator=self.get_discriminator(task_id)
best_loss=np.inf
best_model=utils.get_model(self.model)
best_loss_d=np.inf
best_model_d=utils.get_model(self.discriminator)
dis_lr_update=True
d_lr=self.d_lr[task_id]
patience_d=self.lr_patience
self.optimizer_D=self.get_D_optimizer(task_id, d_lr)
e_lr=self.e_lr[task_id]
patience=self.lr_patience
self.optimizer_S=self.get_S_optimizer(task_id, e_lr)
for e in range(self.nepochs):
# Train
clock0=time.time()
self.train_epoch(dataset['train'], task_id)
clock1=time.time()
train_res=self.eval_(dataset['train'], task_id)
utils.report_tr(train_res, e, self.sbatch, clock0, clock1)
# lowering the learning rate in the beginning if it predicts random chance for the first 5 epochs
if (self.args.experiment == 'cifar100' or self.args.experiment == 'miniimagenet') and e == 4:
random_chance=20.
threshold=random_chance + 2
if train_res['acc_t'] < threshold:
# Restore best validation model
d_lr=self.d_lr[task_id] / 10.
self.optimizer_D=self.get_D_optimizer(task_id, d_lr)
print("Performance on task {} is {} so Dis's lr is decreased to {}".format(task_id, train_res[
'acc_t'], d_lr), end=" ")
e_lr=self.e_lr[task_id] / 10.
self.optimizer_S=self.get_S_optimizer(task_id, e_lr)
self.discriminator=self.get_discriminator(task_id)
if task_id > 0:
self.model=self.load_checkpoint(task_id - 1)
else:
self.model=self.network.Net(self.args).to(self.args.device)
# Valid
valid_res=self.eval_(dataset['valid'], task_id)
utils.report_val(valid_res)
# Adapt lr for S and D
if valid_res['loss_tot'] < best_loss:
best_loss=valid_res['loss_tot']
best_model=utils.get_model(self.model)
patience=self.lr_patience
print(' *', end='')
else:
patience-=1
if patience <= 0:
e_lr/=self.lr_factor
print(' lr={:.1e}'.format(e_lr), end='')
if e_lr < self.lr_min:
print()
break
patience=self.lr_patience
self.optimizer_S=self.get_S_optimizer(task_id, e_lr)
if train_res['loss_a'] < best_loss_d:
best_loss_d=train_res['loss_a']
best_model_d=utils.get_model(self.discriminator)
patience_d=self.lr_patience
else:
patience_d-=1
if patience_d <= 0 and dis_lr_update:
d_lr/=self.lr_factor
print(' Dis lr={:.1e}'.format(d_lr))
if d_lr < self.lr_min:
dis_lr_update=False
print("Dis lr reached minimum value")
print()
patience_d=self.lr_patience
self.optimizer_D=self.get_D_optimizer(task_id, d_lr)
print()
# Restore best validation model (early-stopping)
self.model.load_state_dict(copy.deepcopy(best_model))
self.discriminator.load_state_dict(copy.deepcopy(best_model_d))
self.save_all_models(task_id)
def train_epoch(self, train_loader, task_id):
self.model.train()
self.discriminator.train()
for data, target, tt, td in train_loader:
x=data.to(device=self.device)
y=target.to(device=self.device, dtype=torch.long)
tt=tt.to(device=self.device)
# Detaching samples in the batch which do not belong to the current task before feeding them to P
t_current=task_id * torch.ones_like(tt)
body_mask=torch.eq(t_current, tt).cpu().numpy()
# x_task_module=data.to(device=self.device)
x_task_module=data.clone()
for index in range(x.size(0)):
if body_mask[index] == 0:
x_task_module[index]=x_task_module[index].detach()
x_task_module=x_task_module.to(device=self.device)
# Discriminator's real and fake task labels
t_real_D=td.to(self.device)
t_fake_D=torch.zeros_like(t_real_D).to(self.device)
# ================================================================== #
# Train Shared Module #
# ================================================================== #
# training S for s_steps
for s_step in range(self.s_steps):
self.optimizer_S.zero_grad()
self.model.zero_grad()
output=self.model(x, x_task_module, tt, task_id)
task_loss=self.task_loss(output, y)
shared_encoded, task_encoded=self.model.get_encoded_ftrs(x, x_task_module, task_id)
dis_out_gen_training=self.discriminator.forward(shared_encoded, t_real_D, task_id)
adv_loss=self.adversarial_loss_s(dis_out_gen_training, t_real_D)
if self.diff == 'yes':
diff_loss=self.diff_loss(shared_encoded, task_encoded)
else:
diff_loss=torch.tensor(0).to(device=self.device, dtype=torch.float32)
self.diff_loss_reg=0
total_loss=task_loss + self.adv_loss_reg * adv_loss + self.diff_loss_reg * diff_loss
total_loss.backward(retain_graph=True)
self.optimizer_S.step()
# ================================================================== #
# Train Discriminator #
# ================================================================== #
# training discriminator for d_steps
for d_step in range(self.d_steps):
self.optimizer_D.zero_grad()
self.discriminator.zero_grad()
# training discriminator on real data
output=self.model(x, x_task_module, tt, task_id)
shared_encoded, task_out=self.model.get_encoded_ftrs(x, x_task_module, task_id)
dis_real_out=self.discriminator.forward(shared_encoded.detach(), t_real_D, task_id)
dis_real_loss=self.adversarial_loss_d(dis_real_out, t_real_D)
if self.args.experiment == 'miniimagenet':
dis_real_loss*=self.adv_loss_reg
dis_real_loss.backward(retain_graph=True)
# training discriminator on fake data
z_fake=torch.as_tensor(np.random.normal(self.mu, self.sigma, (x.size(0), self.latent_dim)),dtype=torch.float32, device=self.device)
dis_fake_out=self.discriminator.forward(z_fake, t_real_D, task_id)
dis_fake_loss=self.adversarial_loss_d(dis_fake_out, t_fake_D)
if self.args.experiment == 'miniimagenet':
dis_fake_loss*=self.adv_loss_reg
dis_fake_loss.backward(retain_graph=True)
self.optimizer_D.step()
return
def eval_(self, data_loader, task_id):
loss_a, loss_t, loss_d, loss_total=0, 0, 0, 0
correct_d, correct_t = 0, 0
num=0
batch=0
self.model.eval()
self.discriminator.eval()
res={}
with torch.no_grad():
for batch, (data, target, tt, td) in enumerate(data_loader):
x=data.to(device=self.device)
y=target.to(device=self.device, dtype=torch.long)
tt=tt.to(device=self.device)
t_real_D=td.to(self.device)
# Forward
output=self.model(x, x, tt, task_id)
shared_out, task_out=self.model.get_encoded_ftrs(x, x, task_id)
_, pred=output.max(1)
correct_t+=pred.eq(y.view_as(pred)).sum().item()
# Discriminator's performance:
output_d=self.discriminator.forward(shared_out, t_real_D, task_id)
_, pred_d=output_d.max(1)
correct_d+=pred_d.eq(t_real_D.view_as(pred_d)).sum().item()
# Loss values
task_loss=self.task_loss(output, y)
adv_loss=self.adversarial_loss_d(output_d, t_real_D)
if self.diff == 'yes':
diff_loss=self.diff_loss(shared_out, task_out)
else:
diff_loss=torch.tensor(0).to(device=self.device, dtype=torch.float32)
self.diff_loss_reg=0
total_loss = task_loss + self.adv_loss_reg * adv_loss + self.diff_loss_reg * diff_loss
loss_t+=task_loss
loss_a+=adv_loss
loss_d+=diff_loss
loss_total+=total_loss
num+=x.size(0)
res['loss_t'], res['acc_t']=loss_t.item() / (batch + 1), 100 * correct_t / num
res['loss_a'], res['acc_d']=loss_a.item() / (batch + 1), 100 * correct_d / num
res['loss_d']=loss_d.item() / (batch + 1)
res['loss_tot']=loss_total.item() / (batch + 1)
res['size']=self.loader_size(data_loader)
return res
#
def test(self, data_loader, task_id, model):
loss_a, loss_t, loss_d, loss_total=0, 0, 0, 0
correct_d, correct_t=0, 0
num=0
batch=0
model.eval()
self.discriminator.eval()
res={}
with torch.no_grad():
for batch, (data, target, tt, td) in enumerate(data_loader):
x=data.to(device=self.device)
y=target.to(device=self.device, dtype=torch.long)
tt=tt.to(device=self.device)
t_real_D=td.to(self.device)
# Forward
output=model.forward(x, x, tt, task_id)
shared_out, task_out=model.get_encoded_ftrs(x, x, task_id)
_, pred=output.max(1)
correct_t+=pred.eq(y.view_as(pred)).sum().item()
# Discriminator's performance:
output_d=self.discriminator.forward(shared_out, tt, task_id)
_, pred_d=output_d.max(1)
correct_d+=pred_d.eq(t_real_D.view_as(pred_d)).sum().item()
if self.diff == 'yes':
diff_loss=self.diff_loss(shared_out, task_out)
else:
diff_loss=torch.tensor(0).to(device=self.device, dtype=torch.float32)
self.diff_loss_reg=0
# Loss values
adv_loss=self.adversarial_loss_d(output_d, t_real_D)
task_loss=self.task_loss(output, y)
total_loss=task_loss + self.adv_loss_reg * adv_loss + self.diff_loss_reg * diff_loss
loss_t+=task_loss
loss_a+=adv_loss
loss_d+=diff_loss
loss_total+=total_loss
num+=x.size(0)
res['loss_t'], res['acc_t']=loss_t.item() / (batch + 1), 100 * correct_t / num
res['loss_a'], res['acc_d']=loss_a.item() / (batch + 1), 100 * correct_d / num
res['loss_d']=loss_d.item() / (batch + 1)
res['loss_tot']=loss_total.item() / (batch + 1)
res['size']=self.loader_size(data_loader)
return res
def save_all_models(self, task_id):
print("Saving all models for task {} ...".format(task_id+1))
dis=utils.get_model(self.discriminator)
torch.save({'model_state_dict': dis,
}, os.path.join(self.checkpoint, 'discriminator_{}.pth.tar'.format(task_id)))
model=utils.get_model(self.model)
torch.save({'model_state_dict': model,
}, os.path.join(self.checkpoint, 'model_{}.pth.tar'.format(task_id)))
def load_model(self, task_id):
# Load a previous model
net=self.network.Net(self.args)
checkpoint=torch.load(os.path.join(self.checkpoint, 'model_{}.pth.tar'.format(task_id)))
net.load_state_dict(checkpoint['model_state_dict'])
# # Change the previous shared module with the current one
current_shared_module=deepcopy(self.model.shared.state_dict())
net.shared.load_state_dict(current_shared_module)
net=net.to(self.args.device)
return net
def load_checkpoint(self, task_id):
print("Loading checkpoint for task {} ...".format(task_id))
# Load a previous model
net=self.network.Net(self.args)
checkpoint=torch.load(os.path.join(self.checkpoint, 'model_{}.pth.tar'.format(task_id)))
net.load_state_dict(checkpoint['model_state_dict'])
net=net.to(self.args.device)
return net
def loader_size(self, data_loader):
return data_loader.dataset.__len__()
def get_tsne_embeddings_first_ten_tasks(self, dataset, model):
from tensorboardX import SummaryWriter
model.eval()
tag_ = '_diff_{}'.format(self.args.diff)
all_images, all_shared, all_private = [], [], []
# Test final model on first 10 tasks:
writer = SummaryWriter()
for t in range(10):
for itr, (data, _, tt, td) in enumerate(dataset[t]['tsne']):
x = data.to(device=self.device)
tt = tt.to(device=self.device)
output = model.forward(x, x, tt, t)
shared_out, private_out = model.get_encoded_ftrs(x, x, t)
all_shared.append(shared_out)
all_private.append(private_out)
all_images.append(x)
print (torch.stack(all_shared).size())
tag = ['Shared10_{}_{}'.format(tag_,i) for i in range(1,11)]
writer.add_embedding(mat=torch.stack(all_shared,dim=1).data, label_img=torch.stack(all_images,dim=1).data, metadata=list(range(1,11)),
tag=tag)#, metadata_header=list(range(1,11)))
tag = ['Private10_{}_{}'.format(tag_, i) for i in range(1, 11)]
writer.add_embedding(mat=torch.stack(all_private,dim=1).data, label_img=torch.stack(all_images,dim=1).data, metadata=list(range(1,11)),
tag=tag)#,metadata_header=list(range(1,11)))
writer.close()
def get_tsne_embeddings_last_three_tasks(self, dataset, model):
from tensorboardX import SummaryWriter
# Test final model on last 3 tasks:
model.eval()
tag = '_diff_{}'.format(self.args.diff)
for t in [17,18,19]:
all_images, all_labels, all_shared, all_private = [], [], [], []
writer = SummaryWriter()
for itr, (data, target, tt, td) in enumerate(dataset[t]['tsne']):
x = data.to(device=self.device)
y = target.to(device=self.device, dtype=torch.long)
tt = tt.to(device=self.device)
output = model.forward(x, x, tt, t)
shared_out, private_out = model.get_encoded_ftrs(x, x, t)
# print (shared_out.size())
all_shared.append(shared_out)
all_private.append(private_out)
all_images.append(x)
all_labels.append(y)
writer.add_embedding(mat=torch.stack(all_shared,dim=1).data, label_img=torch.stack(all_images,dim=1).data,
metadata=list(range(1,6)), tag='Shared_{}_{}'.format(t, tag))
# ,metadata_header=list(range(1,6)))
writer.add_embedding(mat=torch.stack(all_private,dim=1).data, label_img=torch.stack(all_images,dim=1).data,
metadata=list(range(1,6)), tag='Private_{}_{}'.format(t, tag))
# ,metadata_header=list(range(1,6)))
writer.close()
#
class DiffLoss(torch.nn.Module):
# From: Domain Separation Networks (https://arxiv.org/abs/1608.06019)
# Konstantinos Bousmalis, George Trigeorgis, Nathan Silberman, Dilip Krishnan, Dumitru Erhan
def __init__(self):
super(DiffLoss, self).__init__()
def forward(self, D1, D2):
D1=D1.view(D1.size(0), -1)
D1_norm=torch.norm(D1, p=2, dim=1, keepdim=True).detach()
D1_norm=D1.div(D1_norm.expand_as(D1) + 1e-6)
D2=D2.view(D2.size(0), -1)
D2_norm=torch.norm(D2, p=2, dim=1, keepdim=True).detach()
D2_norm=D2.div(D2_norm.expand_as(D2) + 1e-6)
# return torch.mean((D1_norm.mm(D2_norm.t()).pow(2)))
return torch.mean((D1_norm.mm(D2_norm.t()).pow(2)))
| Adversarial-Continual-Learning-main | src/acl.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
from copy import deepcopy
import pickle
import time
import uuid
from subprocess import call
########################################################################################################################
def human_format(num):
magnitude=0
while abs(num)>=1000:
magnitude+=1
num/=1000.0
return '%.1f%s'%(num,['','K','M','G','T','P'][magnitude])
def report_tr(res, e, sbatch, clock0, clock1):
# Training performance
print(
'| Epoch {:3d}, time={:5.1f}ms/{:5.1f}ms | Train losses={:.3f} | T: loss={:.3f}, acc={:5.2f}% | D: loss={:.3f}, acc={:5.1f}%, '
'Diff loss:{:.3f} |'.format(
e + 1,
1000 * sbatch * (clock1 - clock0) / res['size'],
1000 * sbatch * (time.time() - clock1) / res['size'], res['loss_tot'],
res['loss_t'], res['acc_t'], res['loss_a'], res['acc_d'], res['loss_d']), end='')
def report_val(res):
# Validation performance
print(' Valid losses={:.3f} | T: loss={:.6f}, acc={:5.2f}%, | D: loss={:.3f}, acc={:5.2f}%, Diff loss={:.3f} |'.format(
res['loss_tot'], res['loss_t'], res['acc_t'], res['loss_a'], res['acc_d'], res['loss_d']), end='')
########################################################################################################################
def get_model(model):
return deepcopy(model.state_dict())
########################################################################################################################
def compute_conv_output_size(Lin,kernel_size,stride=1,padding=0,dilation=1):
return int(np.floor((Lin+2*padding-dilation*(kernel_size-1)-1)/float(stride)+1))
########################################################################################################################
def save_print_log(taskcla, acc, lss, output_path):
print('*'*100)
print('Accuracies =')
for i in range(acc.shape[0]):
print('\t',end=',')
for j in range(acc.shape[1]):
print('{:5.4f}% '.format(acc[i,j]),end=',')
print()
print ('ACC: {:5.4f}%'.format((np.mean(acc[acc.shape[0]-1,:]))))
print()
print ('BWD Transfer = ')
print ()
print ("Diagonal R_ii")
for i in range(acc.shape[0]):
print('\t',end='')
print('{:5.2f}% '.format(np.diag(acc)[i]), end=',')
print()
print ("Last row")
for i in range(acc.shape[0]):
print('\t', end=',')
print('{:5.2f}% '.format(acc[-1][i]), end=',')
print()
# BWT calculated based on GEM paper (https://arxiv.org/abs/1706.08840)
gem_bwt = sum(acc[-1]-np.diag(acc))/ (len(acc[-1])-1)
# BWT calculated based on our UCB paper (https://openreview.net/pdf?id=HklUCCVKDB)
ucb_bwt = (acc[-1] - np.diag(acc)).mean()
print ('BWT: {:5.2f}%'.format(gem_bwt))
# print ('BWT (UCB paper): {:5.2f}%'.format(ucb_bwt))
print('*'*100)
print('Done!')
logs = {}
# save results
logs['name'] = output_path
logs['taskcla'] = taskcla
logs['acc'] = acc
logs['loss'] = lss
logs['gem_bwt'] = gem_bwt
logs['ucb_bwt'] = ucb_bwt
logs['rii'] = np.diag(acc)
logs['rij'] = acc[-1]
# pickle
with open(os.path.join(output_path, 'logs.p'), 'wb') as output:
pickle.dump(logs, output)
print ("Log file saved in ", os.path.join(output_path, 'logs.p'))
def print_log_acc_bwt(taskcla, acc, lss, output_path, run_id):
print('*'*100)
print('Accuracies =')
for i in range(acc.shape[0]):
print('\t',end=',')
for j in range(acc.shape[1]):
print('{:5.4f}% '.format(acc[i,j]),end=',')
print()
avg_acc = np.mean(acc[acc.shape[0]-1,:])
print ('ACC: {:5.4f}%'.format(avg_acc))
print()
print()
# BWT calculated based on GEM paper (https://arxiv.org/abs/1706.08840)
gem_bwt = sum(acc[-1]-np.diag(acc))/ (len(acc[-1])-1)
# BWT calculated based on UCB paper (https://arxiv.org/abs/1906.02425)
ucb_bwt = (acc[-1] - np.diag(acc)).mean()
print ('BWT: {:5.2f}%'.format(gem_bwt))
# print ('BWT (UCB paper): {:5.2f}%'.format(ucb_bwt))
print('*'*100)
print('Done!')
logs = {}
# save results
logs['name'] = output_path
logs['taskcla'] = taskcla
logs['acc'] = acc
logs['loss'] = lss
logs['gem_bwt'] = gem_bwt
logs['ucb_bwt'] = ucb_bwt
logs['rii'] = np.diag(acc)
logs['rij'] = acc[-1]
# pickle
path = os.path.join(output_path, 'logs_run_id_{}.p'.format(run_id))
with open(path, 'wb') as output:
pickle.dump(logs, output)
print ("Log file saved in ", path)
return avg_acc, gem_bwt
def print_running_acc_bwt(acc, task_num):
print()
acc = acc[:task_num+1,:task_num+1]
avg_acc = np.mean(acc[acc.shape[0] - 1, :])
gem_bwt = sum(acc[-1] - np.diag(acc)) / (len(acc[-1]) - 1)
print('ACC: {:5.4f}% || BWT: {:5.2f}% '.format(avg_acc, gem_bwt))
print()
def make_directories(args):
uid = uuid.uuid4().hex
if args.checkpoint is None:
os.mkdir('checkpoints')
args.checkpoint = os.path.join('./checkpoints/',uid)
os.mkdir(args.checkpoint)
else:
if not os.path.exists(args.checkpoint):
os.mkdir(args.checkpoint)
args.checkpoint = os.path.join(args.checkpoint, uid)
os.mkdir(args.checkpoint)
def some_sanity_checks(args):
# Making sure the chosen experiment matches with the number of tasks performed in the paper:
datasets_tasks = {}
datasets_tasks['mnist5']=[5]
datasets_tasks['pmnist']=[10,20,30,40]
datasets_tasks['cifar100']=[20]
datasets_tasks['miniimagenet']=[20]
datasets_tasks['multidatasets']=[5]
if not args.ntasks in datasets_tasks[args.experiment]:
raise Exception("Chosen number of tasks ({}) does not match with {} experiment".format(args.ntasks,args.experiment))
# Making sure if memory usage is happenning:
if args.use_memory == 'yes' and not args.samples > 0:
raise Exception("Flags required to use memory: --use_memory yes --samples n where n>0")
if args.use_memory == 'no' and args.samples > 0:
raise Exception("Flags required to use memory: --use_memory yes --samples n where n>0")
def save_code(args):
cwd = os.getcwd()
des = os.path.join(args.checkpoint, 'code') + '/'
if not os.path.exists(des):
os.mkdir(des)
def get_folder(folder):
return os.path.join(cwd,folder)
folders = [get_folder(item) for item in ['dataloaders', 'networks', 'configs', 'main.py', 'acl.py', 'utils.py']]
for folder in folders:
call('cp -rf {} {}'.format(folder, des),shell=True)
def print_time():
from datetime import datetime
# datetime object containing current date and time
now = datetime.now()
# dd/mm/YY H:M:S
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
print("Job finished at =", dt_string)
| Adversarial-Continual-Learning-main | src/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os,argparse,time
import numpy as np
from omegaconf import OmegaConf
import torch
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import utils
tstart=time.time()
# Arguments
parser = argparse.ArgumentParser(description='Adversarial Continual Learning...')
# Load the config file
parser.add_argument('--config', type=str, default='./configs/config_mnist5.yml')
flags = parser.parse_args()
args = OmegaConf.load(flags.config)
print()
########################################################################################################################
# Args -- Experiment
if args.experiment=='pmnist':
from dataloaders import pmnist as datagenerator
elif args.experiment=='mnist5':
from dataloaders import mnist5 as datagenerator
elif args.experiment=='cifar100':
from dataloaders import cifar100 as datagenerator
elif args.experiment=='miniimagenet':
from dataloaders import miniimagenet as datagenerator
elif args.experiment=='multidatasets':
from dataloaders import mulitidatasets as datagenerator
else:
raise NotImplementedError
from acl import ACL as approach
# Args -- Network
if args.experiment == 'mnist5' or args.experiment == 'pmnist':
from networks import mlp_acl as network
elif args.experiment == 'cifar100' or args.experiment == 'miniimagenet' or args.experiment == 'multidatasets':
from networks import alexnet_acl as network
else:
raise NotImplementedError
########################################################################################################################
def run(args, run_id):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(args.seed)
# Faster run but not deterministic:
# torch.backends.cudnn.benchmark = True
# To get deterministic results that match with paper at cost of lower speed:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Data loader
print('Instantiate data generators and model...')
dataloader = datagenerator.DatasetGen(args)
args.taskcla, args.inputsize = dataloader.taskcla, dataloader.inputsize
if args.experiment == 'multidatasets': args.lrs = dataloader.lrs
# Model
net = network.Net(args)
net = net.to(args.device)
net.print_model_size()
# print (net)
# Approach
appr=approach(net,args,network=network)
# Loop tasks
acc=np.zeros((len(args.taskcla),len(args.taskcla)),dtype=np.float32)
lss=np.zeros((len(args.taskcla),len(args.taskcla)),dtype=np.float32)
for t,ncla in args.taskcla:
print('*'*250)
dataset = dataloader.get(t)
print(' '*105, 'Dataset {:2d} ({:s})'.format(t+1,dataset[t]['name']))
print('*'*250)
# Train
appr.train(t,dataset[t])
print('-'*250)
print()
for u in range(t+1):
# Load previous model and replace the shared module with the current one
test_model = appr.load_model(u)
test_res = appr.test(dataset[u]['test'], u, model=test_model)
print('>>> Test on task {:2d} - {:15s}: loss={:.3f}, acc={:5.1f}% <<<'.format(u, dataset[u]['name'],
test_res['loss_t'],
test_res['acc_t']))
acc[t, u] = test_res['acc_t']
lss[t, u] = test_res['loss_t']
# Save
print()
print('Saved accuracies at '+os.path.join(args.checkpoint,args.output))
np.savetxt(os.path.join(args.checkpoint,args.output),acc,'%.6f')
# Extract embeddings to plot in tensorboard for miniimagenet
if args.tsne == 'yes' and args.experiment == 'miniimagenet':
appr.get_tsne_embeddings_first_ten_tasks(dataset, model=appr.load_model(t))
appr.get_tsne_embeddings_last_three_tasks(dataset, model=appr.load_model(t))
avg_acc, gem_bwt = utils.print_log_acc_bwt(args.taskcla, acc, lss, output_path=args.checkpoint, run_id=run_id)
return avg_acc, gem_bwt
#######################################################################################################################
def main(args):
utils.make_directories(args)
utils.some_sanity_checks(args)
utils.save_code(args)
print('=' * 100)
print('Arguments =')
for arg in vars(args):
print('\t' + arg + ':', getattr(args, arg))
print('=' * 100)
accuracies, forgetting = [], []
for n in range(args.num_runs):
args.seed = n
args.output = '{}_{}_tasks_seed_{}.txt'.format(args.experiment, args.ntasks, args.seed)
print ("args.output: ", args.output)
print (" >>>> Run #", n)
acc, bwt = run(args, n)
accuracies.append(acc)
forgetting.append(bwt)
print('*' * 100)
print ("Average over {} runs: ".format(args.num_runs))
print ('AVG ACC: {:5.4f}% \pm {:5.4f}'.format(np.array(accuracies).mean(), np.array(accuracies).std()))
print ('AVG BWT: {:5.2f}% \pm {:5.4f}'.format(np.array(forgetting).mean(), np.array(forgetting).std()))
print ("All Done! ")
print('[Elapsed time = {:.1f} min]'.format((time.time()-tstart)/(60)))
utils.print_time()
#######################################################################################################################
if __name__ == '__main__':
main(args)
| Adversarial-Continual-Learning-main | src/main.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
from PIL import Image
import os
import os.path
import sys
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
import torch.utils.data as data
import numpy as np
import torch
from torchvision import datasets, transforms
from utils import *
class iCIFAR10(datasets.CIFAR10):
def __init__(self, root, classes, memory_classes, memory, task_num, train, transform=None, target_transform=None, download=True):
super(iCIFAR10, self).__init__(root, transform=transform,
target_transform=target_transform, download=True)
self.train = train # training set or test set
if not isinstance(classes, list):
classes = [classes]
self.class_mapping = {c: i for i, c in enumerate(classes)}
self.class_indices = {}
for cls in classes:
self.class_indices[self.class_mapping[cls]] = []
if self.train:
train_data = []
train_labels = []
train_tt = [] # task module labels
train_td = [] # disctiminator labels
for i in range(len(self.data)):
if self.targets[i] in classes:
train_data.append(self.data[i])
train_labels.append(self.class_mapping[self.targets[i]])
train_tt.append(task_num)
train_td.append(task_num+1)
self.class_indices[self.class_mapping[self.targets[i]]].append(i)
if memory_classes:
for task_id in range(task_num):
for i in range(len(memory[task_id]['x'])):
if memory[task_id]['y'][i] in range(len(memory_classes[task_id])):
train_data.append(memory[task_id]['x'][i])
train_labels.append(memory[task_id]['y'][i])
train_tt.append(memory[task_id]['tt'][i])
train_td.append(memory[task_id]['td'][i])
self.train_data = np.array(train_data)
self.train_labels = train_labels
self.train_tt = train_tt
self.train_td = train_td
if not self.train:
f = self.test_list[0][0]
file = os.path.join(self.root, self.base_folder, f)
fo = open(file, 'rb')
if sys.version_info[0] == 2:
entry = pickle.load(fo)
else:
entry = pickle.load(fo, encoding='latin1')
self.test_data = entry['data']
if 'labels' in entry:
self.test_labels = entry['labels']
else:
self.test_labels = entry['fine_labels']
fo.close()
self.test_data = self.test_data.reshape((10000, 3, 32, 32))
self.test_data = self.test_data.transpose((0, 2, 3, 1)) # convert to HWC
test_data = []
test_labels = []
test_tt = [] # task module labels
test_td = [] # disctiminator labels
for i in range(len(self.test_data)):
if self.test_labels[i] in classes:
test_data.append(self.test_data[i])
test_labels.append(self.class_mapping[self.test_labels[i]])
test_tt.append(task_num)
test_td.append(task_num + 1)
self.class_indices[self.class_mapping[self.test_labels[i]]].append(i)
self.test_data = np.array(test_data)
self.test_labels = test_labels
self.test_tt = test_tt
self.test_td = test_td
def __getitem__(self, index):
if self.train:
img, target, tt, td = self.train_data[index], self.train_labels[index], self.train_tt[index], self.train_td[index]
else:
img, target, tt, td = self.test_data[index], self.test_labels[index], self.test_tt[index], self.test_td[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
try:
img = Image.fromarray(img)
except:
pass
try:
if self.transform is not None:
img = self.transform(img)
except:
pass
try:
if self.target_transform is not None:
target = self.target_transform(target)
except:
pass
return img, target, tt, td
def __len__(self):
if self.train:
return len(self.train_data)
else:
return len(self.test_data)
class iCIFAR100(iCIFAR10):
"""`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
This is a subclass of the `CIFAR10` Dataset.
"""
base_folder = 'cifar-100-python'
url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
meta = {
'filename': 'meta',
'key': 'fine_label_names',
'md5': '7973b15100ade9c7d40fb424638fde48',
}
class DatasetGen(object):
"""docstring for DatasetGen"""
def __init__(self, args):
super(DatasetGen, self).__init__()
self.seed = args.seed
self.batch_size=args.batch_size
self.pc_valid=args.pc_valid
self.root = args.data_dir
self.latent_dim = args.latent_dim
self.num_tasks = args.ntasks
self.num_classes = 100
self.num_samples = args.samples
self.inputsize = [3,32,32]
mean=[x/255 for x in [125.3,123.0,113.9]]
std=[x/255 for x in [63.0,62.1,66.7]]
self.transformation = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])
self.taskcla = [[t, int(self.num_classes/self.num_tasks)] for t in range(self.num_tasks)]
self.indices = {}
self.dataloaders = {}
self.idx={}
self.num_workers = args.workers
self.pin_memory = True
np.random.seed(self.seed)
task_ids = np.split(np.random.permutation(self.num_classes),self.num_tasks)
self.task_ids = [list(arr) for arr in task_ids]
self.train_set = {}
self.test_set = {}
self.train_split = {}
self.task_memory = {}
for i in range(self.num_tasks):
self.task_memory[i] = {}
self.task_memory[i]['x'] = []
self.task_memory[i]['y'] = []
self.task_memory[i]['tt'] = []
self.task_memory[i]['td'] = []
self.use_memory = args.use_memory
def get(self, task_id):
self.dataloaders[task_id] = {}
sys.stdout.flush()
if task_id == 0:
memory_classes = None
memory=None
else:
memory_classes = self.task_ids
memory = self.task_memory
self.train_set[task_id] = iCIFAR100(root=self.root, classes=self.task_ids[task_id], memory_classes=memory_classes,
memory=memory, task_num=task_id, train=True, download=True, transform=self.transformation)
self.test_set[task_id] = iCIFAR100(root=self.root, classes=self.task_ids[task_id], memory_classes=None,
memory=None, task_num=task_id, train=False,
download=True, transform=self.transformation)
split = int(np.floor(self.pc_valid * len(self.train_set[task_id])))
train_split, valid_split = torch.utils.data.random_split(self.train_set[task_id], [len(self.train_set[task_id]) - split, split])
self.train_split[task_id] = train_split
train_loader = torch.utils.data.DataLoader(train_split, batch_size=self.batch_size, num_workers=self.num_workers,
pin_memory=self.pin_memory,shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_split, batch_size=int(self.batch_size * self.pc_valid),
num_workers=self.num_workers, pin_memory=self.pin_memory,shuffle=True)
test_loader = torch.utils.data.DataLoader(self.test_set[task_id], batch_size=self.batch_size, num_workers=self.num_workers,
pin_memory=self.pin_memory,shuffle=True)
self.dataloaders[task_id]['train'] = train_loader
self.dataloaders[task_id]['valid'] = valid_loader
self.dataloaders[task_id]['test'] = test_loader
self.dataloaders[task_id]['name'] = 'CIFAR100-{}-{}'.format(task_id,self.task_ids[task_id])
print ("Training set size: {} images of {}x{}".format(len(train_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Validation set size: {} images of {}x{}".format(len(valid_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Train+Val set size: {} images of {}x{}".format(len(valid_loader.dataset)+len(train_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Test set size: {} images of {}x{}".format(len(test_loader.dataset),self.inputsize[1],self.inputsize[1]))
if self.use_memory == 'yes' and self.num_samples > 0 :
self.update_memory(task_id)
return self.dataloaders
def update_memory(self, task_id):
num_samples_per_class = self.num_samples // len(self.task_ids[task_id])
mem_class_mapping = {i: i for i, c in enumerate(self.task_ids[task_id])}
# Looping over each class in the current task
for i in range(len(self.task_ids[task_id])):
# Getting all samples for this class
data_loader = torch.utils.data.DataLoader(self.train_split[task_id], batch_size=1,
num_workers=self.num_workers,
pin_memory=self.pin_memory)
# Randomly choosing num_samples_per_class for this class
randind = torch.randperm(len(data_loader.dataset))[:num_samples_per_class]
# Adding the selected samples to memory
for ind in randind:
self.task_memory[task_id]['x'].append(data_loader.dataset[ind][0])
self.task_memory[task_id]['y'].append(mem_class_mapping[i])
self.task_memory[task_id]['tt'].append(data_loader.dataset[ind][2])
self.task_memory[task_id]['td'].append(data_loader.dataset[ind][3])
print ('Memory updated by adding {} images'.format(len(self.task_memory[task_id]['x'])))
| Adversarial-Continual-Learning-main | src/dataloaders/cifar100.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
import sys
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
import torch.utils.data as data
import torch.utils.data
from .datasets_utils import *
from utils import *
from torchvision import transforms
mean_datasets = {
'CIFAR10': [x/255 for x in [125.3,123.0,113.9]],
'notMNIST': (0.4254,),
'MNIST': (0.1,) ,
'SVHN':[0.4377,0.4438,0.4728] ,
'FashionMNIST': (0.2190,),
}
std_datasets = {
'CIFAR10': [x/255 for x in [63.0,62.1,66.7]],
'notMNIST': (0.4501,),
'MNIST': (0.2752,),
'SVHN': [0.198,0.201,0.197],
'FashionMNIST': (0.3318,)
}
classes_datasets = {
'CIFAR10': 10,
'notMNIST': 10,
'MNIST': 10,
'SVHN': 10,
'FashionMNIST': 10,
}
lr_datasets = {
'CIFAR10': 0.001,
'notMNIST': 0.01,
'MNIST': 0.01,
'SVHN': 0.001,
'FashionMNIST': 0.01,
}
gray_datasets = {
'CIFAR10': False,
'notMNIST': True,
'MNIST': True,
'SVHN': False,
'FashionMNIST': True,
}
class DatasetGen(object):
"""docstring for DatasetGen"""
def __init__(self, args):
super(DatasetGen, self).__init__()
self.seed = args.seed
self.batch_size=args.batch_size
self.pc_valid=args.pc_valid
self.root = args.data_dir
self.latent_dim = args.latent_dim
self.num_tasks = args.ntasks
self.num_samples = args.samples
self.inputsize = [3,32,32]
self.indices = {}
self.dataloaders = {}
self.idx={}
self.num_workers = args.workers
self.pin_memory = True
np.random.seed(self.seed)
self.datasets_idx = list(np.random.permutation(self.num_tasks))
print('Task order =', [list(classes_datasets.keys())[item] for item in self.datasets_idx])
self.datasets_names = [list(classes_datasets.keys())[item] for item in self.datasets_idx]
self.taskcla = []
self.lrs = []
for i in range(self.num_tasks):
t = self.datasets_idx[i]
self.taskcla.append([i, list(classes_datasets.values())[t]])
self.lrs.append([i, list(lr_datasets.values())[t]])
print('Learning Rates =', self.lrs)
print('taskcla =', self.taskcla)
self.train_set = {}
self.train_split = {}
self.test_set = {}
self.args=args
self.dataloaders, self.memory_set = {}, {}
self.memoryloaders = {}
self.dataloaders, self.memory_set, self.indices = {}, {}, {}
self.memoryloaders = {}
self.saliency_loaders, self.saliency_set = {}, {}
for i in range(self.num_tasks):
self.dataloaders[i] = {}
self.memory_set[i] = {}
self.memoryloaders[i] = {}
self.indices[i] = {}
# self.saliency_set = {}
self.saliency_loaders[i] = {}
self.download = True
self.train_set = {}
self.test_set = {}
self.train_split = {}
self.task_memory = {}
for i in range(self.num_tasks):
self.task_memory[i] = {}
self.task_memory[i]['x'] = []
self.task_memory[i]['y'] = []
self.task_memory[i]['tt'] = []
self.task_memory[i]['td'] = []
self.use_memory = args.use_memory
def get_dataset(self, dataset_idx, task_num, num_samples_per_class=False, normalize=True):
dataset_name = list(mean_datasets.keys())[dataset_idx]
nspc = num_samples_per_class
if normalize:
transformation = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean_datasets[dataset_name],std_datasets[dataset_name])])
mnist_transformation = transforms.Compose([
transforms.Pad(padding=2, fill=0),
transforms.ToTensor(),
transforms.Normalize(mean_datasets[dataset_name], std_datasets[dataset_name])])
else:
transformation = transforms.Compose([transforms.ToTensor()])
mnist_transformation = transforms.Compose([
transforms.Pad(padding=2, fill=0),
transforms.ToTensor(),
])
# target_transormation = transforms.Compose([transforms.ToTensor()])
target_transormation = None
if dataset_idx == 0:
trainset = CIFAR10_(root=self.root, task_num=task_num, num_samples_per_class=nspc, train=True, download=self.download, target_transform = target_transormation, transform=transformation)
testset = CIFAR10_(root=self.root, task_num=task_num, num_samples_per_class=nspc, train=False, download=self.download, target_transform = target_transormation, transform=transformation)
if dataset_idx == 1:
trainset = notMNIST_(root=self.root, task_num=task_num, num_samples_per_class=nspc, train=True, download=self.download, target_transform = target_transormation, transform=mnist_transformation)
testset = notMNIST_(root=self.root, task_num=task_num, num_samples_per_class=nspc, train=False, download=self.download, target_transform = target_transormation, transform=mnist_transformation)
if dataset_idx == 2:
trainset = MNIST_RGB(root=self.root, train=True, num_samples_per_class=nspc, task_num=task_num, download=self.download, target_transform = target_transormation, transform=mnist_transformation)
testset = MNIST_RGB(root=self.root, train=False, num_samples_per_class=nspc, task_num=task_num, download=self.download, target_transform = target_transormation, transform=mnist_transformation)
if dataset_idx == 3:
trainset = SVHN_(root=self.root, train=True, num_samples_per_class=nspc, task_num=task_num, download=self.download, target_transform = target_transormation, transform=transformation)
testset = SVHN_(root=self.root, train=False, num_samples_per_class=nspc, task_num=task_num, download=self.download, target_transform = target_transormation, transform=transformation)
if dataset_idx == 4:
trainset = FashionMNIST_(root=self.root, num_samples_per_class=nspc, task_num=task_num, train=True, download=self.download, target_transform = target_transormation, transform=mnist_transformation)
testset = FashionMNIST_(root=self.root, num_samples_per_class=nspc, task_num=task_num, train=False, download=self.download, target_transform = target_transormation, transform=mnist_transformation)
return trainset, testset
def get(self, task_id):
self.dataloaders[task_id] = {}
sys.stdout.flush()
current_dataset_idx = self.datasets_idx[task_id]
dataset_name = list(mean_datasets.keys())[current_dataset_idx]
self.train_set[task_id], self.test_set[task_id] = self.get_dataset(current_dataset_idx,task_id)
self.num_classes = classes_datasets[dataset_name]
split = int(np.floor(self.pc_valid * len(self.train_set[task_id])))
train_split, valid_split = torch.utils.data.random_split(self.train_set[task_id], [len(self.train_set[task_id]) - split, split])
self.train_split[task_id] = train_split
train_loader = torch.utils.data.DataLoader(train_split, batch_size=self.batch_size, num_workers=self.num_workers,
pin_memory=self.pin_memory,shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_split, batch_size=int(self.batch_size * self.pc_valid),
num_workers=self.num_workers, pin_memory=self.pin_memory,shuffle=True)
test_loader = torch.utils.data.DataLoader(self.test_set[task_id], batch_size=self.batch_size, num_workers=self.num_workers,
pin_memory=self.pin_memory,shuffle=True)
self.dataloaders[task_id]['train'] = train_loader
self.dataloaders[task_id]['valid'] = valid_loader
self.dataloaders[task_id]['test'] = test_loader
self.dataloaders[task_id]['name'] = '{} - {} classes - {} images'.format(dataset_name,
classes_datasets[dataset_name],
len(self.train_set[task_id]))
self.dataloaders[task_id]['classes'] = self.num_classes
print ("Training set size: {} images of {}x{}".format(len(train_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Validation set size: {} images of {}x{}".format(len(valid_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Train+Val set size: {} images of {}x{}".format(len(valid_loader.dataset)+len(train_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Test set size: {} images of {}x{}".format(len(test_loader.dataset),self.inputsize[1],self.inputsize[1]))
if self.use_memory == 'yes' and self.num_samples > 0 :
self.update_memory(task_id)
return self.dataloaders
def update_memory(self, task_id):
num_samples_per_class = self.num_samples // len(self.task_ids[task_id])
mem_class_mapping = {i: i for i, c in enumerate(self.task_ids[task_id])}
# Looping over each class in the current task
for i in range(len(self.task_ids[task_id])):
# Getting all samples for this class
data_loader = torch.utils.data.DataLoader(self.train_split[task_id], batch_size=1,
num_workers=self.num_workers,
pin_memory=self.pin_memory)
# Randomly choosing num_samples_per_class for this class
randind = torch.randperm(len(data_loader.dataset))[:num_samples_per_class]
# Adding the selected samples to memory
for ind in randind:
self.task_memory[task_id]['x'].append(data_loader.dataset[ind][0])
self.task_memory[task_id]['y'].append(mem_class_mapping[i])
self.task_memory[task_id]['tt'].append(data_loader.dataset[ind][2])
self.task_memory[task_id]['td'].append(data_loader.dataset[ind][3])
print('Memory updated by adding {} images'.format(len(self.task_memory[task_id]['x'])))
def report_size(self,dataset_name,task_id):
print("Dataset {} size: {} ".format(dataset_name, len(self.train_set[task_id])))
| Adversarial-Continual-Learning-main | src/dataloaders/mulitidatasets.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# https://github.com/pytorch/vision/blob/8635be94d1216f10fb8302da89233bd86445e449/torchvision/datasets/utils.py
import os
import os.path
import hashlib
import gzip
import errno
import tarfile
import zipfile
import numpy as np
import torch
import codecs
from torch.utils.model_zoo import tqdm
def gen_bar_updater():
pbar = tqdm(total=None)
def bar_update(count, block_size, total_size):
if pbar.total is None and total_size:
pbar.total = total_size
progress_bytes = count * block_size
pbar.update(progress_bytes - pbar.n)
return bar_update
def calculate_md5(fpath, chunk_size=1024 * 1024):
md5 = hashlib.md5()
with open(fpath, 'rb') as f:
for chunk in iter(lambda: f.read(chunk_size), b''):
md5.update(chunk)
return md5.hexdigest()
def check_md5(fpath, md5, **kwargs):
return md5 == calculate_md5(fpath, **kwargs)
def check_integrity(fpath, md5=None):
if not os.path.isfile(fpath):
return False
if md5 is None:
return True
return check_md5(fpath, md5)
def makedir_exist_ok(dirpath):
"""
Python2 support for os.makedirs(.., exist_ok=True)
"""
try:
os.makedirs(dirpath)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
def download_url(url, root, filename=None, md5=None):
"""Download a file from a url and place it in root.
Args:
url (str): URL to download file from
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under. If None, use the basename of the URL
md5 (str, optional): MD5 checksum of the download. If None, do not check
"""
from six.moves import urllib
root = os.path.expanduser(root)
if not filename:
filename = os.path.basename(url)
fpath = os.path.join(root, filename)
makedir_exist_ok(root)
# downloads file
if check_integrity(fpath, md5):
print('Using downloaded and verified file: ' + fpath)
else:
try:
print('Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(
url, fpath,
reporthook=gen_bar_updater()
)
except (urllib.error.URLError, IOError) as e:
if url[:5] == 'https':
url = url.replace('https:', 'http:')
print('Failed download. Trying https -> http instead.'
' Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(
url, fpath,
reporthook=gen_bar_updater()
)
else:
raise e
def list_dir(root, prefix=False):
"""List all directories at a given root
Args:
root (str): Path to directory whose folders need to be listed
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the directories found
"""
root = os.path.expanduser(root)
directories = list(
filter(
lambda p: os.path.isdir(os.path.join(root, p)),
os.listdir(root)
)
)
if prefix is True:
directories = [os.path.join(root, d) for d in directories]
return directories
def list_files(root, suffix, prefix=False):
"""List all files ending with a suffix at a given root
Args:
root (str): Path to directory whose folders need to be listed
suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png').
It uses the Python "str.endswith" method and is passed directly
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the files found
"""
root = os.path.expanduser(root)
files = list(
filter(
lambda p: os.path.isfile(os.path.join(root, p)) and p.endswith(suffix),
os.listdir(root)
)
)
if prefix is True:
files = [os.path.join(root, d) for d in files]
return files
def download_file_from_google_drive(file_id, root, filename=None, md5=None):
"""Download a Google Drive file from and place it in root.
Args:
file_id (str): id of file to be downloaded
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under. If None, use the id of the file.
md5 (str, optional): MD5 checksum of the download. If None, do not check
"""
# Based on https://stackoverflow.com/questions/38511444/python-download-files-from-google-drive-using-url
import requests
url = "https://docs.google.com/uc?export=download"
root = os.path.expanduser(root)
if not filename:
filename = file_id
fpath = os.path.join(root, filename)
makedir_exist_ok(root)
if os.path.isfile(fpath) and check_integrity(fpath, md5):
print('Using downloaded and verified file: ' + fpath)
else:
session = requests.Session()
response = session.get(url, params={'id': file_id}, stream=True)
token = _get_confirm_token(response)
if token:
params = {'id': file_id, 'confirm': token}
response = session.get(url, params=params, stream=True)
_save_response_content(response, fpath)
def _get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def _save_response_content(response, destination, chunk_size=32768):
with open(destination, "wb") as f:
pbar = tqdm(total=None)
progress = 0
for chunk in response.iter_content(chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
progress += len(chunk)
pbar.update(progress - pbar.n)
pbar.close()
def _is_tar(filename):
return filename.endswith(".tar")
def _is_targz(filename):
return filename.endswith(".tar.gz")
def _is_gzip(filename):
return filename.endswith(".gz") and not filename.endswith(".tar.gz")
def _is_zip(filename):
return filename.endswith(".zip")
def extract_archive(from_path, to_path=None, remove_finished=False):
if to_path is None:
to_path = os.path.dirname(from_path)
if _is_tar(from_path):
with tarfile.open(from_path, 'r') as tar:
tar.extractall(path=to_path)
elif _is_targz(from_path):
with tarfile.open(from_path, 'r:gz') as tar:
tar.extractall(path=to_path)
elif _is_gzip(from_path):
to_path = os.path.join(to_path, os.path.splitext(os.path.basename(from_path))[0])
with open(to_path, "wb") as out_f, gzip.GzipFile(from_path) as zip_f:
out_f.write(zip_f.read())
elif _is_zip(from_path):
with zipfile.ZipFile(from_path, 'r') as z:
z.extractall(to_path)
else:
raise ValueError("Extraction of {} not supported".format(from_path))
if remove_finished:
os.remove(from_path)
def download_and_extract_archive(url, download_root, extract_root=None, filename=None,
md5=None, remove_finished=False):
download_root = os.path.expanduser(download_root)
if extract_root is None:
extract_root = download_root
if not filename:
filename = os.path.basename(url)
download_url(url, download_root, filename, md5)
archive = os.path.join(download_root, filename)
print("Extracting {} to {}".format(archive, extract_root))
extract_archive(archive, extract_root, remove_finished)
def iterable_to_str(iterable):
return "'" + "', '".join([str(item) for item in iterable]) + "'"
def verify_str_arg(value, arg=None, valid_values=None, custom_msg=None):
if not isinstance(value, torch._six.string_classes):
if arg is None:
msg = "Expected type str, but got type {type}."
else:
msg = "Expected type str for argument {arg}, but got type {type}."
msg = msg.format(type=type(value), arg=arg)
raise ValueError(msg)
if valid_values is None:
return value
if value not in valid_values:
if custom_msg is not None:
msg = custom_msg
else:
msg = ("Unknown value '{value}' for argument {arg}. "
"Valid values are {{{valid_values}}}.")
msg = msg.format(value=value, arg=arg,
valid_values=iterable_to_str(valid_values))
raise ValueError(msg)
return value
def get_int(b):
return int(codecs.encode(b, 'hex'), 16)
def open_maybe_compressed_file(path):
"""Return a file object that possibly decompresses 'path' on the fly.
Decompression occurs when argument `path` is a string and ends with '.gz' or '.xz'.
"""
if not isinstance(path, torch._six.string_classes):
return path
if path.endswith('.gz'):
import gzip
return gzip.open(path, 'rb')
if path.endswith('.xz'):
import lzma
return lzma.open(path, 'rb')
return open(path, 'rb')
def read_sn3_pascalvincent_tensor(path, strict=True):
"""Read a SN3 file in "Pascal Vincent" format (Lush file 'libidx/idx-io.lsh').
Argument may be a filename, compressed filename, or file object.
"""
# typemap
if not hasattr(read_sn3_pascalvincent_tensor, 'typemap'):
read_sn3_pascalvincent_tensor.typemap = {
8: (torch.uint8, np.uint8, np.uint8),
9: (torch.int8, np.int8, np.int8),
11: (torch.int16, np.dtype('>i2'), 'i2'),
12: (torch.int32, np.dtype('>i4'), 'i4'),
13: (torch.float32, np.dtype('>f4'), 'f4'),
14: (torch.float64, np.dtype('>f8'), 'f8')}
# read
with open_maybe_compressed_file(path) as f:
data = f.read()
# parse
magic = get_int(data[0:4])
nd = magic % 256
ty = magic // 256
assert nd >= 1 and nd <= 3
assert ty >= 8 and ty <= 14
m = read_sn3_pascalvincent_tensor.typemap[ty]
s = [get_int(data[4 * (i + 1): 4 * (i + 2)]) for i in range(nd)]
parsed = np.frombuffer(data, dtype=m[1], offset=(4 * (nd + 1)))
assert parsed.shape[0] == np.prod(s) or not strict
return torch.from_numpy(parsed.astype(m[2], copy=False)).view(*s)
def read_label_file(path):
with open(path, 'rb') as f:
x = read_sn3_pascalvincent_tensor(f, strict=False)
assert(x.dtype == torch.uint8)
assert(x.ndimension() == 1)
return x.long()
def read_image_file(path):
with open(path, 'rb') as f:
x = read_sn3_pascalvincent_tensor(f, strict=False)
assert(x.dtype == torch.uint8)
assert(x.ndimension() == 3)
return x | Adversarial-Continual-Learning-main | src/dataloaders/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
from PIL import Image
import os
import os.path
import sys
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
import torch.utils.data as data
import numpy as np
import torch
from torchvision import transforms
from utils import *
class MiniImageNet(torch.utils.data.Dataset):
def __init__(self, root, train):
super(MiniImageNet, self).__init__()
if train:
self.name='train'
else:
self.name='test'
root = os.path.join(root, 'miniimagenet')
with open(os.path.join(root,'{}.pkl'.format(self.name)), 'rb') as f:
data_dict = pickle.load(f)
self.data = data_dict['images']
self.labels = data_dict['labels']
def __len__(self):
return len(self.data)
def __getitem__(self, i):
img, label = self.data[i], self.labels[i]
return img, label
class iMiniImageNet(MiniImageNet):
def __init__(self, root, classes, memory_classes, memory, task_num, train, transform=None):
super(iMiniImageNet, self).__init__(root=root, train=train)
self.transform = transform
if not isinstance(classes, list):
classes = [classes]
self.class_mapping = {c: i for i, c in enumerate(classes)}
self.class_indices = {}
for cls in classes:
self.class_indices[self.class_mapping[cls]] = []
data = []
labels = []
tt = [] # task module labels
td = [] # disctiminator labels
for i in range(len(self.data)):
if self.labels[i] in classes:
data.append(self.data[i])
labels.append(self.class_mapping[self.labels[i]])
tt.append(task_num)
td.append(task_num+1)
self.class_indices[self.class_mapping[self.labels[i]]].append(i)
if memory_classes:
for task_id in range(task_num):
for i in range(len(memory[task_id]['x'])):
if memory[task_id]['y'][i] in range(len(memory_classes[task_id])):
data.append(memory[task_id]['x'][i])
labels.append(memory[task_id]['y'][i])
tt.append(memory[task_id]['tt'][i])
td.append(memory[task_id]['td'][i])
self.data = np.array(data)
self.labels = labels
self.tt = tt
self.td = td
def __getitem__(self, index):
img, target, tt, td = self.data[index], self.labels[index], self.tt[index], self.td[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
if not torch.is_tensor(img):
img = Image.fromarray(img)
img = self.transform(img)
return img, target, tt, td
def __len__(self):
return len(self.data)
class DatasetGen(object):
"""docstring for DatasetGen"""
def __init__(self, args):
super(DatasetGen, self).__init__()
self.seed = args.seed
self.batch_size=args.batch_size
self.pc_valid=args.pc_valid
self.root = args.data_dir
self.latent_dim = args.latent_dim
self.use_memory = args.use_memory
self.num_tasks = args.ntasks
self.num_classes = 100
self.num_samples = args.samples
self.inputsize = [3,84,84]
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
self.transformation = transforms.Compose([
transforms.Resize((84,84)),
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std)])
self.taskcla = [[t, int(self.num_classes/self.num_tasks)] for t in range(self.num_tasks)]
self.indices = {}
self.dataloaders = {}
self.idx={}
self.num_workers = args.workers
self.pin_memory = True
np.random.seed(self.seed)
task_ids = np.split(np.random.permutation(self.num_classes),self.num_tasks)
self.task_ids = [list(arr) for arr in task_ids]
self.train_set = {}
self.train_split = {}
self.test_set = {}
self.task_memory = {}
for i in range(self.num_tasks):
self.task_memory[i] = {}
self.task_memory[i]['x'] = []
self.task_memory[i]['y'] = []
self.task_memory[i]['tt'] = []
self.task_memory[i]['td'] = []
def get(self, task_id):
self.dataloaders[task_id] = {}
sys.stdout.flush()
if task_id == 0:
memory_classes = None
memory=None
else:
memory_classes = self.task_ids
memory = self.task_memory
self.train_set[task_id] = iMiniImageNet(root=self.root, classes=self.task_ids[task_id],
memory_classes=memory_classes, memory=memory,
task_num=task_id, train=True, transform=self.transformation)
self.test_set[task_id] = iMiniImageNet(root=self.root, classes=self.task_ids[task_id], memory_classes=None,
memory=None, task_num=task_id, train=False, transform=self.transformation)
split = int(np.floor(self.pc_valid * len(self.train_set[task_id])))
train_split, valid_split = torch.utils.data.random_split(self.train_set[task_id], [len(self.train_set[task_id]) - split, split])
self.train_split[task_id] = train_split
train_loader = torch.utils.data.DataLoader(train_split, batch_size=self.batch_size, num_workers=self.num_workers,
pin_memory=self.pin_memory,shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_split, batch_size=int(self.batch_size * self.pc_valid),
num_workers=self.num_workers, pin_memory=self.pin_memory,shuffle=True)
test_loader = torch.utils.data.DataLoader(self.test_set[task_id], batch_size=self.batch_size, num_workers=self.num_workers,
pin_memory=self.pin_memory, shuffle=True)
self.dataloaders[task_id]['train'] = train_loader
self.dataloaders[task_id]['valid'] = valid_loader
self.dataloaders[task_id]['test'] = test_loader
self.dataloaders[task_id]['name'] = 'iMiniImageNet-{}-{}'.format(task_id,self.task_ids[task_id])
self.dataloaders[task_id]['tsne'] = torch.utils.data.DataLoader(self.test_set[task_id],
batch_size=len(test_loader.dataset),
num_workers=self.num_workers,
pin_memory=self.pin_memory, shuffle=True)
print ("Task ID: ", task_id)
print ("Training set size: {} images of {}x{}".format(len(train_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Validation set size: {} images of {}x{}".format(len(valid_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Train+Val set size: {} images of {}x{}".format(len(valid_loader.dataset)+len(train_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Test set size: {} images of {}x{}".format(len(test_loader.dataset),self.inputsize[1],self.inputsize[1]))
if self.use_memory == 'yes' and self.num_samples > 0 :
self.update_memory(task_id)
return self.dataloaders
def update_memory(self, task_id):
num_samples_per_class = self.num_samples // len(self.task_ids[task_id])
mem_class_mapping = {i: i for i, c in enumerate(self.task_ids[task_id])}
for i in range(len(self.task_ids[task_id])):
data_loader = torch.utils.data.DataLoader(self.train_split[task_id], batch_size=1,
num_workers=self.num_workers,
pin_memory=self.pin_memory)
randind = torch.randperm(len(data_loader.dataset))[:num_samples_per_class] # randomly sample some data
for ind in randind:
self.task_memory[task_id]['x'].append(data_loader.dataset[ind][0])
self.task_memory[task_id]['y'].append(mem_class_mapping[i])
self.task_memory[task_id]['tt'].append(data_loader.dataset[ind][2])
self.task_memory[task_id]['td'].append(data_loader.dataset[ind][3])
print ('Memory updated by adding {} images'.format(len(self.task_memory[task_id]['x']))) | Adversarial-Continual-Learning-main | src/dataloaders/miniimagenet.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
from PIL import Image
import torch
import numpy as np
import os.path
import sys
import torch.utils.data as data
from torchvision import datasets, transforms
class iMNIST(datasets.MNIST):
def __init__(self, root, classes, memory_classes, memory, task_num, train, transform=None, target_transform=None, download=True):
super(iMNIST, self).__init__(root, task_num, transform=transform,
target_transform=target_transform, download=download)
self.train = train # training set or test set
self.root = root
self.target_transform=target_transform
self.transform=transform
if download:
self.download()
if not self._check_exists():
raise RuntimeError('Dataset not found.' + ' You can use download=True to download it')
if self.train:
data_file = self.training_file
else:
data_file = self.test_file
self.data, self.targets = torch.load(os.path.join(self.processed_folder, data_file))
self.data=np.array(self.data).astype(np.float32)
self.targets=list(np.array(self.targets))
self.train = train # training set or test set
if not isinstance(classes, list):
classes = [classes]
self.class_mapping = {c: i for i, c in enumerate(classes)}
self.class_indices = {}
for cls in classes:
self.class_indices[self.class_mapping[cls]] = []
data = []
targets = []
tt = [] # task module labels
td = [] # discriminator labels
for i in range(len(self.data)):
if self.targets[i] in classes:
data.append(self.data[i])
targets.append(self.class_mapping[self.targets[i]])
tt.append(task_num)
td.append(task_num+1)
self.class_indices[self.class_mapping[self.targets[i]]].append(i)
if self.train:
if memory_classes:
for task_id in range(task_num):
for i in range(len(memory[task_id]['x'])):
if memory[task_id]['y'][i] in range(len(memory_classes[task_id])):
data.append(memory[task_id]['x'][i])
targets.append(memory[task_id]['y'][i])
tt.append(memory[task_id]['tt'][i])
td.append(memory[task_id]['td'][i])
self.data = data.copy()
self.targets = targets.copy()
self.tt = tt.copy()
self.td = td.copy()
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target, tt, td = self.data[index], int(self.targets[index]), self.tt[index], self.td[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
try:
img = Image.fromarray(img.numpy(), mode='L')
except:
pass
try:
if self.transform is not None: img = self.transform(img)
except:
pass
try:
if self.target_transform is not None: tt = self.target_transform(tt)
if self.target_transform is not None: td = self.target_transform(td)
except:
pass
return img, target, tt, td
def __len__(self):
return len(self.data)
class DatasetGen(object):
"""docstring for DatasetGen"""
def __init__(self, args):
super(DatasetGen, self).__init__()
self.seed = args.seed
self.batch_size=args.batch_size
self.pc_valid=args.pc_valid
self.root = args.data_dir
self.latent_dim = args.latent_dim
self.num_tasks = args.ntasks
self.num_classes = 10
self.num_samples = args.samples
self.inputsize = [1,28,28]
mean = (0.1307,)
std = (0.3081,)
self.transformation = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])
self.taskcla = [[t, int(self.num_classes/self.num_tasks)] for t in range(self.num_tasks)]
self.indices = {}
self.dataloaders = {}
self.idx={}
self.num_workers = args.workers
self.pin_memory = True
np.random.seed(self.seed)
self.task_ids = [[0,1], [2,3], [4,5], [6,7], [8,9]]
self.train_set = {}
self.test_set = {}
self.task_memory = {}
for i in range(self.num_tasks):
self.task_memory[i] = {}
self.task_memory[i]['x'] = []
self.task_memory[i]['y'] = []
self.task_memory[i]['tt'] = []
self.task_memory[i]['td'] = []
def get(self, task_id):
self.dataloaders[task_id] = {}
sys.stdout.flush()
if task_id == 0:
memory_classes = None
memory=None
else:
memory_classes = self.task_ids
memory = self.task_memory
self.train_set[task_id] = iMNIST(root=self.root, classes=self.task_ids[task_id], memory_classes=memory_classes,
memory=memory, task_num=task_id, train=True,
download=True, transform=self.transformation)
self.test_set[task_id] = iMNIST(root=self.root, classes=self.task_ids[task_id], memory_classes=None,
memory=None, task_num=task_id, train=False,
download=True, transform=self.transformation)
split = int(np.floor(self.pc_valid * len(self.train_set[task_id])))
train_split, valid_split = torch.utils.data.random_split(self.train_set[task_id], [len(self.train_set[task_id]) - split, split])
train_loader = torch.utils.data.DataLoader(train_split, batch_size=self.batch_size, num_workers=self.num_workers,
pin_memory=self.pin_memory, drop_last=True,shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_split, batch_size=int(self.batch_size * self.pc_valid),shuffle=True,
num_workers=self.num_workers, pin_memory=self.pin_memory, drop_last=True)
test_loader = torch.utils.data.DataLoader(self.test_set[task_id], batch_size=self.batch_size, num_workers=self.num_workers,
pin_memory=self.pin_memory, drop_last=True,shuffle=True)
self.dataloaders[task_id]['train'] = train_loader
self.dataloaders[task_id]['valid'] = valid_loader
self.dataloaders[task_id]['test'] = test_loader
self.dataloaders[task_id]['name'] = '5Split-MNIST-{}-{}'.format(task_id,self.task_ids[task_id])
print ("Training set size: {} images of {}x{}".format(len(train_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Validation set size: {} images of {}x{}".format(len(valid_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Test set size: {} images of {}x{}".format(len(test_loader.dataset),self.inputsize[1],self.inputsize[1]))
return self.dataloaders
def update_memory(self, task_id):
num_samples_per_class = self.num_samples // len(self.task_ids[task_id])
mem_class_mapping = {i: i for i, c in enumerate(self.task_ids[task_id])}
# Looping over each class in the current task
for i in range(len(self.task_ids[task_id])):
dataset = iMNIST(root=self.root, classes=self.task_ids[task_id][i], memory_classes=None, memory=None,
task_num=task_id, train=True, download=True, transform=self.transformation)
data_loader = torch.utils.data.DataLoader(dataset, shuffle=True, batch_size=1,
num_workers=self.num_workers,
pin_memory=self.pin_memory)
# Randomly choosing num_samples_per_class for this class
randind = torch.randperm(len(data_loader.dataset))[:num_samples_per_class]
# Adding the selected samples to memory
for ind in randind:
self.task_memory[task_id]['x'].append(data_loader.dataset[ind][0])
self.task_memory[task_id]['y'].append(mem_class_mapping[i])
self.task_memory[task_id]['tt'].append(data_loader.dataset[ind][2])
self.task_memory[task_id]['td'].append(data_loader.dataset[ind][3]) | Adversarial-Continual-Learning-main | src/dataloaders/mnist5.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import sys, os
import numpy as np
from PIL import Image
import torch.utils.data as data
from torchvision import datasets, transforms
from sklearn.utils import shuffle
from utils import *
class PermutedMNIST(datasets.MNIST):
def __init__(self, root, task_num, train=True, permute_idx=None, transform=None):
super(PermutedMNIST, self).__init__(root, train, download=True)
if self.train:
data_file = self.training_file
else:
data_file = self.test_file
self.data, self.targets = torch.load(os.path.join(self.processed_folder, data_file))
self.data = torch.stack([img.float().view(-1)[permute_idx] for img in self.data])
self.tl = (task_num) * torch.ones(len(self.data),dtype=torch.long)
self.td = (task_num+1) * torch.ones(len(self.data),dtype=torch.long)
def __getitem__(self, index):
img, target, tl, td = self.data[index], self.targets[index], self.tl[index], self.td[index]
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
print ("We are transforming")
target = self.target_transform(target)
return img, target, tl, td
def __len__(self):
return self.data.size(0)
class DatasetGen(object):
def __init__(self, args):
super(DatasetGen, self).__init__()
self.seed = args.seed
self.batch_size=args.batch_size
self.pc_valid=args.pc_valid
self.num_samples = args.samples
self.num_tasks = args.ntasks
self.root = args.data_dir
self.use_memory = args.use_memory
self.inputsize = [1, 28, 28]
mean = (0.1307,)
std = (0.3081,)
self.transformation = transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean, std)])
self.taskcla = [[t, 10] for t in range(self.num_tasks)]
self.train_set, self.test_set = {}, {}
self.indices = {}
self.dataloaders = {}
self.idx={}
self.get_idx()
self.pin_memory = True
self.num_workers = args.workers
self.task_memory = []
def get(self, task_id):
self.dataloaders[task_id] = {}
sys.stdout.flush()
if task_id == 0:
self.train_set[task_id] = PermutedMNIST(root=self.root, task_num=task_id, train=True,
permute_idx=self.idx[task_id], transform=self.transformation)
if self.use_memory == 'yes' and self.num_samples > 0:
indices=torch.randperm(len(self.train_set[task_id]))[:self.num_samples]
rand_subset=torch.utils.data.Subset(self.train_set[task_id], indices)
self.task_memory.append(rand_subset)
else:
if self.use_memory == 'yes' and self.num_samples > 0:
current_dataset = PermutedMNIST(root=self.root, task_num=task_id, train=True,
permute_idx=self.idx[task_id], transform=self.transformation)
d = []
d.append(current_dataset)
for m in self.task_memory:
d.append(m)
self.train_set[task_id] = torch.utils.data.ConcatDataset(d)
indices=torch.randperm(len(current_dataset))[:self.num_samples]
rand_subset=torch.utils.data.Subset(current_dataset, indices)
self.task_memory.append(rand_subset)
else:
self.train_set[task_id] = PermutedMNIST(root=self.root, task_num=task_id, train=True,
permute_idx=self.idx[task_id], transform=self.transformation)
self.test_set[task_id] = PermutedMNIST(root=self.root, task_num=task_id, train=False,
permute_idx=self.idx[task_id], transform=self.transformation)
split = int(np.floor(self.pc_valid * len(self.train_set[task_id])))
train_split, valid_split = torch.utils.data.random_split(self.train_set[task_id],
[len(self.train_set[task_id]) - split, split])
train_loader = torch.utils.data.DataLoader(train_split, batch_size=self.batch_size,
num_workers=self.num_workers, pin_memory=self.pin_memory,shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_split, batch_size=self.batch_size,
num_workers=self.num_workers, pin_memory=self.pin_memory,shuffle=True)
test_loader = torch.utils.data.DataLoader(self.test_set[task_id], batch_size=self.batch_size,
num_workers=self.num_workers, pin_memory=self.pin_memory,shuffle=True)
self.dataloaders[task_id]['train'] = train_loader
self.dataloaders[task_id]['valid'] = valid_loader
self.dataloaders[task_id]['test'] = test_loader
self.dataloaders[task_id]['name'] = 'pmnist-{}'.format(task_id+1)
print ("Training set size: {} images of {}x{}".format(len(train_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Validation set size: {} images of {}x{}".format(len(valid_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Train+Val set size: {} images of {}x{}".format(len(valid_loader.dataset)+len(train_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Test set size: {} images of {}x{}".format(len(test_loader.dataset),self.inputsize[1],self.inputsize[1]))
return self.dataloaders
def get_idx(self):
for i in range(len(self.taskcla)):
idx = list(range(self.inputsize[1] * self.inputsize[2]))
self.idx[i] = shuffle(idx, random_state=self.seed * 100 + i)
| Adversarial-Continual-Learning-main | src/dataloaders/pmnist.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
import os.path
import sys
import warnings
import urllib.request
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
import torch.utils.data as data
import numpy as np
import torch
from torchvision import datasets, transforms
from .utils import *
# from scipy.imageio import imread
import pandas as pd
import os
import torch
from PIL import Image
import scipy.io as sio
from collections import defaultdict
from itertools import chain
from collections import OrderedDict
class CIFAR10_(datasets.CIFAR10):
base_folder = 'cifar-10-batches-py'
url = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
filename = "cifar-10-python.tar.gz"
tgz_md5 = 'c58f30108f718f92721af3b95e74349a'
train_list = [
['data_batch_1', 'c99cafc152244af753f735de768cd75f'],
['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'],
['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'],
['data_batch_4', '634d18415352ddfa80567beed471001a'],
['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'],
]
test_list = [
['test_batch', '40351d587109b95175f43aff81a1287e'],
]
meta = {
'filename': 'batches.meta',
'key': 'label_names',
'md5': '5ff9c542aee3614f3951f8cda6e48888',
}
num_classes = 10
def __init__(self, root, task_num, num_samples_per_class, train, transform, target_transform, download=True):
# root, task_num, train, transform = None, download = False):
super(CIFAR10_, self).__init__(root, task_num, transform=transform,
target_transform=target_transform,
download=download)
# print(self.train)
# self.train = train # training set or test set
self.train = train # training set or test set
self.transform = transform
self.target_transform=target_transform
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
if self.train:
downloaded_list = self.train_list
else:
downloaded_list = self.test_list
if not num_samples_per_class:
self.data = []
self.targets = []
# now load the picked numpy arrays
for file_name, checksum in downloaded_list:
file_path = os.path.join(self.root, self.base_folder, file_name)
with open(file_path, 'rb') as f:
if sys.version_info[0] == 2:
entry = pickle.load(f)
else:
entry = pickle.load(f, encoding='latin1')
self.data.append(entry['data'])
if 'labels' in entry:
self.targets.extend(entry['labels'])
else:
self.targets.extend(entry['fine_labels'])
else:
x, y, tt, td = [], [], [], []
for l in range(self.num_classes):
indices_with_label_l = np.where(np.array(self.targets)==l)
x_with_label_l = [self.data[item] for item in indices_with_label_l[0]]
y_with_label_l = [l]*len(x_with_label_l)
# If we need a subset of the dataset with num_samples_per_class we use this and then concatenate it with a complete dataset
shuffled_indices = np.random.permutation(len(x_with_label_l))[:num_samples_per_class]
x_with_label_l = [x_with_label_l[item] for item in shuffled_indices]
y_with_label_l = [y_with_label_l[item] for item in shuffled_indices]
x.append(x_with_label_l)
y.append(y_with_label_l)
self.data = np.array(sum(x,[]))
self.targets = sum(y,[])
self.data = np.vstack(self.data).reshape(-1, 3, 32, 32)
self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC
self.tt = [task_num for _ in range(len(self.data))]
self.td = [task_num + 1 for _ in range(len(self.data))]
self._load_meta()
def __getitem__(self, index):
img, target, tt, td = self.data[index], self.targets[index], self.tt[index], self.td[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
try:
img = Image.fromarray(img)
except:
pass
try:
if self.transform is not None: img = self.transform(img)
except:
pass
try:
if self.target_transform is not None: tt = self.target_transform(tt)
if self.target_transform is not None: td = self.target_transform(td)
except:
pass
return img, target, tt, td
def __len__(self):
# if self.train:
return len(self.data)
# else:
# return len(self.test_data)
def report_size(self):
print("CIFAR10 size at train={} time: {} ".format(self.train,self.__len__()))
def _load_meta(self):
path = os.path.join(self.root, self.base_folder, self.meta['filename'])
if not check_integrity(path, self.meta['md5']):
raise RuntimeError('Dataset metadata file not found or corrupted.' +
' You can use download=True to download it')
with open(path, 'rb') as infile:
if sys.version_info[0] == 2:
data = pickle.load(infile)
else:
data = pickle.load(infile, encoding='latin1')
self.classes = data[self.meta['key']]
self.class_to_idx = {_class: i for i, _class in enumerate(self.classes)}
class CIFAR100_(CIFAR10_):
"""`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
This is a subclass of the `CIFAR10` Dataset.
"""
base_folder = 'cifar-100-python'
url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
meta = {
'filename': 'meta',
'key': 'fine_label_names',
'md5': '7973b15100ade9c7d40fb424638fde48',
}
num_classes = 100
class SVHN_(torch.utils.data.Dataset):
url = ""
filename = ""
file_md5 = ""
split_list = {
'train': ["http://ufldl.stanford.edu/housenumbers/train_32x32.mat",
"train_32x32.mat", "e26dedcc434d2e4c54c9b2d4a06d8373"],
'test': ["http://ufldl.stanford.edu/housenumbers/test_32x32.mat",
"test_32x32.mat", "eb5a983be6a315427106f1b164d9cef3"],
'extra': ["http://ufldl.stanford.edu/housenumbers/extra_32x32.mat",
"extra_32x32.mat", "a93ce644f1a588dc4d68dda5feec44a7"]}
def __init__(self, root, task_num, num_samples_per_class, train,transform=None, target_transform=None, download=True):
self.root = os.path.expanduser(root)
# root, task_num, train, transform = None, download = False):
# print(self.train)
# self.train = train # training set or test set
self.train = train # training set or test set
self.transform = transform
self.target_transform=target_transform
if self.train:
split="train"
else:
split="test"
self.num_classes = 10
self.split = verify_str_arg(split, "split", tuple(self.split_list.keys()))
self.url = self.split_list[split][0]
self.filename = self.split_list[split][1]
self.file_md5 = self.split_list[split][2]
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
# import here rather than at top of file because this is
# an optional dependency for torchvision
import scipy.io as sio
# reading(loading) mat file as array
loaded_mat = sio.loadmat(os.path.join(self.root, self.filename))
self.data = loaded_mat['X']
# loading from the .mat file gives an np array of type np.uint8
# converting to np.int64, so that we have a LongTensor after
# the conversion from the numpy array
# the squeeze is needed to obtain a 1D tensor
self.targets = loaded_mat['y'].astype(np.int64).squeeze()
self.data = np.transpose(self.data, (3, 2, 0, 1))
if num_samples_per_class:
x, y, tt, td = [], [], [], []
for l in range(self.num_classes+1):
indices_with_label_l = np.where(np.array(self.targets)==l)
x_with_label_l = [self.data[item] for item in indices_with_label_l[0]]
y_with_label_l = [l]*len(x_with_label_l)
# If we need a subset of the dataset with num_samples_per_class we use this and then concatenate it with a complete dataset
shuffled_indices = np.random.permutation(len(x_with_label_l))[:num_samples_per_class]
x_with_label_l = [x_with_label_l[item] for item in shuffled_indices]
y_with_label_l = [y_with_label_l[item] for item in shuffled_indices]
x.append(x_with_label_l)
y.append(y_with_label_l)
self.data = np.array(sum(x,[]))
self.targets = np.array(sum(y,[])).astype(np.int64)
# the svhn dataset assigns the class label "10" to the digit 0
# this makes it inconsistent with several loss functions
# which expect the class labels to be in the range [0, C-1]
np.place(self.targets, self.targets == 10, 0)
# print ("svhn: ", self.data.shape)
self.tt = [task_num for _ in range(len(self.data))]
self.td = [task_num+1 for _ in range(len(self.data))]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target, tt, td = self.data[index], int(self.targets[index]), self.tt[index], self.td[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
try:
img = Image.fromarray(np.transpose(img, (1, 2, 0)))
except:
pass
try:
if self.transform is not None: img = self.transform(img)
except:
pass
try:
if self.target_transform is not None: tt = self.target_transform(tt)
if self.target_transform is not None: td = self.target_transform(td)
except:
pass
return img, target, tt, td
def __len__(self):
return len(self.data)
def _check_integrity(self):
root = self.root
md5 = self.split_list[self.split][2]
fpath = os.path.join(root, self.filename)
return check_integrity(fpath, md5)
def download(self):
md5 = self.split_list[self.split][2]
download_url(self.url, self.root, self.filename, md5)
def extra_repr(self):
return "Split: {split}".format(**self.__dict__)
class MNIST_RGB(datasets.MNIST):
def __init__(self, root, task_num, num_samples_per_class, train=True, transform=None, target_transform=None, download=False):
super(MNIST_RGB, self).__init__(root, task_num, transform=transform,
target_transform=target_transform,
download=download)
self.train = train # training set or test set
self.target_transform=target_transform
self.transform=transform
self.num_classes=10
if download:
self.download()
if not self._check_exists():
raise RuntimeError('Dataset not found.' +
' You can use download=True to download it')
if self.train:
data_file = self.training_file
else:
data_file = self.test_file
# self.data, self.targets = torch.load(os.path.join(self.processed_folder, data_file))
self.data, self.targets = torch.load(os.path.join(self.processed_folder, data_file))
self.data=np.array(self.data).astype(np.float32)
self.targets=list(np.array(self.targets))
if num_samples_per_class:
x, y, tt, td = [], [], [], []
for l in range(self.num_classes):
indices_with_label_l = np.where(np.array(self.targets)==l)
x_with_label_l = [self.data[item] for item in indices_with_label_l[0]]
# y_with_label_l = [l]*len(x_with_label_l)
# If we need a subset of the dataset with num_samples_per_class we use this and then concatenate it with a complete dataset
shuffled_indices = np.random.permutation(len(x_with_label_l))[:num_samples_per_class]
x_with_label_l = [x_with_label_l[item] for item in shuffled_indices]
y_with_label_l = [l]*len(shuffled_indices)
x.append(x_with_label_l)
y.append(y_with_label_l)
self.data = np.array(sum(x,[]))
self.targets = sum(y,[])
self.tt = [task_num for _ in range(len(self.data))]
self.td = [task_num+1 for _ in range(len(self.data))]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target, tt, td = self.data[index], int(self.targets[index]), self.tt[index], self.td[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
try:
img = Image.fromarray(img, mode='L').convert('RGB')
except:
pass
try:
if self.transform is not None: img = self.transform(img)
except:
pass
try:
if self.target_transform is not None: tt = self.target_transform(tt)
if self.target_transform is not None: td = self.target_transform(td)
except:
pass
return img, target, tt, td
def __len__(self):
return len(self.data)
@property
def raw_folder(self):
return os.path.join(self.root, self.__class__.__name__, 'raw')
@property
def processed_folder(self):
return os.path.join(self.root, self.__class__.__name__, 'processed')
@property
def class_to_idx(self):
return {_class: i for i, _class in enumerate(self.classes)}
def _check_exists(self):
return (os.path.exists(os.path.join(self.processed_folder,
self.training_file)) and
os.path.exists(os.path.join(self.processed_folder,
self.test_file)))
def download(self):
"""Download the MNIST data if it doesn't exist in processed_folder already."""
if self._check_exists():
return
makedir_exist_ok(self.raw_folder)
makedir_exist_ok(self.processed_folder)
# download files
for url in self.urls:
filename = url.rpartition('/')[2]
download_and_extract_archive(url, download_root=self.raw_folder, filename=filename)
# process and save as torch files
print('Processing...')
training_set = (
read_image_file(os.path.join(self.raw_folder, 'train-images-idx3-ubyte')),
read_label_file(os.path.join(self.raw_folder, 'train-labels-idx1-ubyte'))
)
test_set = (
read_image_file(os.path.join(self.raw_folder, 't10k-images-idx3-ubyte')),
read_label_file(os.path.join(self.raw_folder, 't10k-labels-idx1-ubyte'))
)
with open(os.path.join(self.processed_folder, self.training_file), 'wb') as f:
torch.save(training_set, f)
with open(os.path.join(self.processed_folder, self.test_file), 'wb') as f:
torch.save(test_set, f)
print('Done!')
def extra_repr(self):
return "Split: {}".format("Train" if self.train is True else "Test")
class FashionMNIST_(MNIST_RGB):
"""`Fashion MNIST <https://github.com/zalandoresearch/fashion-mnist>`_ Dataset.
"""
urls = [
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz',
]
class notMNIST_(torch.utils.data.Dataset):
def __init__(self, root, task_num, num_samples_per_class, train,transform=None, target_transform=None, download=False):
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform=target_transform
self.train = train
self.url = "https://github.com/facebookresearch/Adversarial-Continual-Learning/raw/master/data/notMNIST.zip"
self.filename = 'notMNIST.zip'
fpath = os.path.join(root, self.filename)
if not os.path.isfile(fpath):
if not download:
raise RuntimeError('Dataset not found. You can use download=True to download it')
else:
print('Downloading from '+self.url)
download_url(self.url, root, filename=self.filename)
import zipfile
zip_ref = zipfile.ZipFile(fpath, 'r')
zip_ref.extractall(root)
zip_ref.close()
if self.train:
fpath = os.path.join(root, 'notMNIST', 'Train')
else:
fpath = os.path.join(root, 'notMNIST', 'Test')
X, Y = [], []
folders = os.listdir(fpath)
for folder in folders:
folder_path = os.path.join(fpath, folder)
for ims in os.listdir(folder_path):
try:
img_path = os.path.join(folder_path, ims)
X.append(np.array(Image.open(img_path).convert('RGB')))
Y.append(ord(folder) - 65) # Folders are A-J so labels will be 0-9
except:
print("File {}/{} is broken".format(folder, ims))
self.data = np.array(X)
self.targets = Y
self.num_classes = len(set(self.targets))
if num_samples_per_class:
x, y, tt, td = [], [], [], []
for l in range(self.num_classes):
indices_with_label_l = np.where(np.array(self.targets)==l)
x_with_label_l = [self.data[item] for item in indices_with_label_l[0]]
# If we need a subset of the dataset with num_samples_per_class we use this and then concatenate it with a complete dataset
shuffled_indices = np.random.permutation(len(x_with_label_l))[:num_samples_per_class]
x_with_label_l = [x_with_label_l[item] for item in shuffled_indices]
y_with_label_l = [l]*len(shuffled_indices)
x.append(x_with_label_l)
y.append(y_with_label_l)
self.data = np.array(sum(x,[]))
self.labels = sum(y,[])
self.tt = [task_num for _ in range(len(self.data))]
self.td = [task_num + 1 for _ in range(len(self.data))]
def __getitem__(self, index):
img, target, tt, td = self.data[index], self.targets[index], self.tt[index], self.td[index]
img = Image.fromarray(img)#.convert('RGB')
img = self.transform(img)
return img, target, tt, td
def __len__(self):
return len(self.data)
def download(self):
"""Download the notMNIST data if it doesn't exist in processed_folder already."""
import errno
root = os.path.expanduser(self.root)
fpath = os.path.join(root, self.filename)
try:
os.makedirs(root)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
urllib.request.urlretrieve(self.url, fpath)
import zipfile
zip_ref = zipfile.ZipFile(fpath, 'r')
zip_ref.extractall(root)
zip_ref.close()
| Adversarial-Continual-Learning-main | src/dataloaders/datasets_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import utils
class Shared(torch.nn.Module):
def __init__(self,args):
super(Shared, self).__init__()
self.ncha,size,_=args.inputsize
self.taskcla=args.taskcla
self.latent_dim = args.latent_dim
if args.experiment == 'cifar100':
hiddens = [64, 128, 256, 1024, 1024, 512]
elif args.experiment == 'miniimagenet':
hiddens = [64, 128, 256, 512, 512, 512]
# ----------------------------------
elif args.experiment == 'multidatasets':
hiddens = [64, 128, 256, 1024, 1024, 512]
else:
raise NotImplementedError
self.conv1=torch.nn.Conv2d(self.ncha,hiddens[0],kernel_size=size//8)
s=utils.compute_conv_output_size(size,size//8)
s=s//2
self.conv2=torch.nn.Conv2d(hiddens[0],hiddens[1],kernel_size=size//10)
s=utils.compute_conv_output_size(s,size//10)
s=s//2
self.conv3=torch.nn.Conv2d(hiddens[1],hiddens[2],kernel_size=2)
s=utils.compute_conv_output_size(s,2)
s=s//2
self.maxpool=torch.nn.MaxPool2d(2)
self.relu=torch.nn.ReLU()
self.drop1=torch.nn.Dropout(0.2)
self.drop2=torch.nn.Dropout(0.5)
self.fc1=torch.nn.Linear(hiddens[2]*s*s,hiddens[3])
self.fc2=torch.nn.Linear(hiddens[3],hiddens[4])
self.fc3=torch.nn.Linear(hiddens[4],hiddens[5])
self.fc4=torch.nn.Linear(hiddens[5], self.latent_dim)
def forward(self, x_s):
x_s = x_s.view_as(x_s)
h = self.maxpool(self.drop1(self.relu(self.conv1(x_s))))
h = self.maxpool(self.drop1(self.relu(self.conv2(h))))
h = self.maxpool(self.drop2(self.relu(self.conv3(h))))
h = h.view(x_s.size(0), -1)
h = self.drop2(self.relu(self.fc1(h)))
h = self.drop2(self.relu(self.fc2(h)))
h = self.drop2(self.relu(self.fc3(h)))
h = self.drop2(self.relu(self.fc4(h)))
return h
class Private(torch.nn.Module):
def __init__(self, args):
super(Private, self).__init__()
self.ncha,self.size,_=args.inputsize
self.taskcla=args.taskcla
self.latent_dim = args.latent_dim
self.num_tasks = args.ntasks
self.device = args.device
if args.experiment == 'cifar100':
hiddens=[32,32]
flatten=1152
elif args.experiment == 'miniimagenet':
# hiddens=[8,8]
# flatten=1800
hiddens=[16,16]
flatten=3600
elif args.experiment == 'multidatasets':
hiddens=[32,32]
flatten=1152
else:
raise NotImplementedError
self.task_out = torch.nn.ModuleList()
for _ in range(self.num_tasks):
self.conv = torch.nn.Sequential()
self.conv.add_module('conv1',torch.nn.Conv2d(self.ncha, hiddens[0], kernel_size=self.size // 8))
self.conv.add_module('relu1', torch.nn.ReLU(inplace=True))
self.conv.add_module('drop1', torch.nn.Dropout(0.2))
self.conv.add_module('maxpool1', torch.nn.MaxPool2d(2))
self.conv.add_module('conv2', torch.nn.Conv2d(hiddens[0], hiddens[1], kernel_size=self.size // 10))
self.conv.add_module('relu2', torch.nn.ReLU(inplace=True))
self.conv.add_module('dropout2', torch.nn.Dropout(0.5))
self.conv.add_module('maxpool2', torch.nn.MaxPool2d(2))
self.task_out.append(self.conv)
self.linear = torch.nn.Sequential()
self.linear.add_module('linear1', torch.nn.Linear(flatten,self.latent_dim))
self.linear.add_module('relu3', torch.nn.ReLU(inplace=True))
self.task_out.append(self.linear)
def forward(self, x, task_id):
x = x.view_as(x)
out = self.task_out[2*task_id].forward(x)
out = out.view(out.size(0),-1)
out = self.task_out[2*task_id+1].forward(out)
return out
class Net(torch.nn.Module):
def __init__(self, args):
super(Net, self).__init__()
self.ncha,size,_=args.inputsize
self.taskcla=args.taskcla
self.latent_dim = args.latent_dim
self.num_tasks = args.ntasks
self.samples = args.samples
self.image_size = self.ncha*size*size
self.args=args
self.hidden1 = args.head_units
self.hidden2 = args.head_units//2
self.shared = Shared(args)
self.private = Private(args)
self.head = torch.nn.ModuleList()
for i in range(self.num_tasks):
self.head.append(
torch.nn.Sequential(
torch.nn.Linear(2*self.latent_dim, self.hidden1),
torch.nn.ReLU(inplace=True),
torch.nn.Dropout(),
torch.nn.Linear(self.hidden1, self.hidden2),
torch.nn.ReLU(inplace=True),
torch.nn.Linear(self.hidden2, self.taskcla[i][1])
))
def forward(self, x_s, x_p, tt, task_id):
x_s = x_s.view_as(x_s)
x_p = x_p.view_as(x_p)
x_s = self.shared(x_s)
x_p = self.private(x_p, task_id)
x = torch.cat([x_p, x_s], dim=1)
if self.args.experiment == 'multidatasets':
# if no memory is used this is faster:
y=[]
for i,_ in self.taskcla:
y.append(self.head[i](x))
return y[task_id]
else:
return torch.stack([self.head[tt[i]].forward(x[i]) for i in range(x.size(0))])
def get_encoded_ftrs(self, x_s, x_p, task_id):
return self.shared(x_s), self.private(x_p, task_id)
def print_model_size(self):
count_P = sum(p.numel() for p in self.private.parameters() if p.requires_grad)
count_S = sum(p.numel() for p in self.shared.parameters() if p.requires_grad)
count_H = sum(p.numel() for p in self.head.parameters() if p.requires_grad)
print('Num parameters in S = %s ' % (self.pretty_print(count_S)))
print('Num parameters in P = %s, per task = %s ' % (self.pretty_print(count_P),self.pretty_print(count_P/self.num_tasks)))
print('Num parameters in p = %s, per task = %s ' % (self.pretty_print(count_H),self.pretty_print(count_H/self.num_tasks)))
print('Num parameters in P+p = %s ' % self.pretty_print(count_P+count_H))
print('--------------------------> Architecture size: %s parameters (%sB)' % (self.pretty_print(count_S + count_P + count_H),
self.pretty_print(4*(count_S + count_P + count_H))))
print("--------------------------> Memory size: %s samples per task (%sB)" % (self.samples,
self.pretty_print(self.num_tasks*4*self.samples*self.image_size)))
print("------------------------------------------------------------------------------")
print(" TOTAL: %sB" % self.pretty_print(4*(count_S + count_P + count_H)+self.num_tasks*4*self.samples*self.image_size))
def pretty_print(self, num):
magnitude=0
while abs(num) >= 1000:
magnitude+=1
num/=1000.0
return '%.1f%s' % (num, ['', 'K', 'M', 'G', 'T', 'P'][magnitude])
| Adversarial-Continual-Learning-main | src/networks/alexnet_acl.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
class Private(torch.nn.Module):
def __init__(self, args):
super(Private, self).__init__()
self.ncha,self.size,_=args.inputsize
self.taskcla=args.taskcla
self.latent_dim = args.latent_dim
self.num_tasks = args.ntasks
self.nhid = args.units
self.device = args.device
self.task_out = torch.nn.ModuleList()
for _ in range(self.num_tasks):
self.linear = torch.nn.Sequential()
self.linear.add_module('linear', torch.nn.Linear(self.ncha*self.size*self.size, self.latent_dim))
self.linear.add_module('relu', torch.nn.ReLU(inplace=True))
self.task_out.append(self.linear)
def forward(self, x_p, task_id):
x_p = x_p.view(x_p.size(0), -1)
return self.task_out[task_id].forward(x_p)
class Shared(torch.nn.Module):
def __init__(self,args):
super(Shared, self).__init__()
ncha,self.size,_=args.inputsize
self.taskcla=args.taskcla
self.latent_dim = args.latent_dim
self.nhid = args.units
self.nlayers = args.nlayers
self.relu=torch.nn.ReLU()
self.drop=torch.nn.Dropout(0.2)
self.fc1=torch.nn.Linear(ncha*self.size*self.size, self.nhid)
if self.nlayers == 3:
self.fc2 = torch.nn.Linear(self.nhid, self.nhid)
self.fc3=torch.nn.Linear(self.nhid,self.latent_dim)
else:
self.fc2 = torch.nn.Linear(self.nhid,self.latent_dim)
def forward(self, x_s):
h = x_s.view(x_s.size(0), -1)
h = self.drop(self.relu(self.fc1(h)))
h = self.drop(self.relu(self.fc2(h)))
if self.nlayers == 3:
h = self.drop(self.relu(self.fc3(h)))
return h
class Net(torch.nn.Module):
def __init__(self, args):
super(Net, self).__init__()
ncha,size,_=args.inputsize
self.taskcla=args.taskcla
self.latent_dim = args.latent_dim
self.num_tasks = args.ntasks
self.device = args.device
if args.experiment == 'mnist5':
self.hidden1 = 28
self.hidden2 = 14
elif args.experiment == 'pmnist':
self.hidden1 = 28
self.hidden2 = 28
self.samples = args.samples
self.shared = Shared(args)
self.private = Private(args)
self.head = torch.nn.ModuleList()
for i in range(self.num_tasks):
self.head.append(
torch.nn.Sequential(
torch.nn.Linear(2 * self.latent_dim, self.hidden1),
torch.nn.ReLU(inplace=True),
torch.nn.Dropout(),
torch.nn.Linear(self.hidden1, self.hidden2),
torch.nn.ReLU(inplace=True),
torch.nn.Linear(self.hidden2, self.taskcla[i][1])
))
def forward(self,x_s, x_p, tt, task_id):
h_s = x_s.view(x_s.size(0), -1)
h_p = x_s.view(x_p.size(0), -1)
x_s = self.shared(h_s)
x_p = self.private(h_p, task_id)
x = torch.cat([x_p, x_s], dim=1)
return torch.stack([self.head[tt[i]].forward(x[i]) for i in range(x.size(0))])
def get_encoded_ftrs(self, x_s, x_p, task_id):
return self.shared(x_s), self.private(x_p, task_id)
def print_model_size(self):
count_P = sum(p.numel() for p in self.private.parameters() if p.requires_grad)
count_S = sum(p.numel() for p in self.shared.parameters() if p.requires_grad)
count_H = sum(p.numel() for p in self.head.parameters() if p.requires_grad)
print('Num parameters in S = %s ' % (self.pretty_print(count_S)))
print('Num parameters in P = %s, per task = %s ' % (self.pretty_print(count_P),self.pretty_print(count_P/self.num_tasks)))
print('Num parameters in p = %s, per task = %s ' % (self.pretty_print(count_H),self.pretty_print(count_H/self.num_tasks)))
print('Num parameters in P+p = %s ' % self.pretty_print(count_P+count_H))
print('--------------------------> Total architecture size: %s parameters (%sB)' % (self.pretty_print(count_S + count_P + count_H),
self.pretty_print(4*(count_S + count_P + count_H))))
def pretty_print(self, num):
magnitude=0
while abs(num) >= 1000:
magnitude+=1
num/=1000.0
return '%.2f%s' % (num, ['', 'K', 'M', 'G', 'T', 'P'][magnitude])
| Adversarial-Continual-Learning-main | src/networks/mlp_acl.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import utils
class Discriminator(torch.nn.Module):
def __init__(self,args,task_id):
super(Discriminator, self).__init__()
self.num_tasks=args.ntasks
self.units=args.units
self.latent_dim=args.latent_dim
if args.diff == 'yes':
self.dis = torch.nn.Sequential(
GradientReversal(args.lam),
torch.nn.Linear(self.latent_dim, args.units),
torch.nn.LeakyReLU(),
torch.nn.Linear(args.units, args.units),
torch.nn.Linear(args.units, task_id + 2)
)
else:
self.dis = torch.nn.Sequential(
torch.nn.Linear(self.latent_dim, args.units),
torch.nn.LeakyReLU(),
torch.nn.Linear(args.units, args.units),
torch.nn.Linear(args.units, task_id + 2)
)
def forward(self, z, labels, task_id):
return self.dis(z)
def pretty_print(self, num):
magnitude=0
while abs(num) >= 1000:
magnitude+=1
num/=1000.0
return '%.1f%s' % (num, ['', 'K', 'M', 'G', 'T', 'P'][magnitude])
def get_size(self):
count=sum(p.numel() for p in self.dis.parameters() if p.requires_grad)
print('Num parameters in D = %s ' % (self.pretty_print(count)))
class GradientReversalFunction(torch.autograd.Function):
"""
From:
https://github.com/jvanvugt/pytorch-domain-adaptation/blob/cb65581f20b71ff9883dd2435b2275a1fd4b90df/utils.py#L26
Gradient Reversal Layer from:
Unsupervised Domain Adaptation by Backpropagation (Ganin & Lempitsky, 2015)
Forward pass is the identity function. In the backward pass,
the upstream gradients are multiplied by -lambda (i.e. gradient is reversed)
"""
@staticmethod
def forward(ctx, x, lambda_):
ctx.lambda_ = lambda_
return x.clone()
@staticmethod
def backward(ctx, grads):
lambda_ = ctx.lambda_
lambda_ = grads.new_tensor(lambda_)
dx = -lambda_ * grads
return dx, None
class GradientReversal(torch.nn.Module):
def __init__(self, lambda_):
super(GradientReversal, self).__init__()
self.lambda_ = lambda_
def forward(self, x):
return GradientReversalFunction.apply(x, self.lambda_) | Adversarial-Continual-Learning-main | src/networks/discriminator.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Wrapper script for launching a job on the fair cluster.
Sample usage:
python cluster_run.py --name=trial --setup='/path/to/setup.sh' --cmd='job_command'
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pdb
from absl import app
from absl import flags
import os
import sys
import random
import string
import datetime
import re
opts = flags.FLAGS
flags.DEFINE_integer('nodes', 1, 'Number of nodes per task')
flags.DEFINE_integer('ntp', 1, 'Number of tasks per node')
flags.DEFINE_integer('ncpus', 40, 'Number of cpu cores per task')
flags.DEFINE_integer('ngpus', 1, 'Number of gpus per task')
flags.DEFINE_string('name', '', 'Job name')
flags.DEFINE_enum('partition', 'learnfair', ['dev', 'priority','uninterrupted','learnfair'], 'Cluster partition')
flags.DEFINE_string('comment', 'for ICML deadline in 2020.', 'Comment')
flags.DEFINE_string('time', '72:00:00', 'Time for which the job should run')
flags.DEFINE_string('setup', '/private/home/tanmayshankar/Research/Code/Setup.bash', 'Setup script that will be run before the command')
# flags.DEFINE_string('workdir', os.getcwd(), 'Job command')
flags.DEFINE_string('workdir', '/private/home/tanmayshankar/Research/Code/CausalSkillLearning/Experiments', 'Jod command')
# flags.DEFINE_string('workdir', '/private/home/tanmayshankar/Research/Code/SkillsfromDemonstrations/Experiments/BidirectionalInfoModel/', 'Job command')
flags.DEFINE_string('cmd', 'echo $PWD', 'Directory to run job from')
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def main(_):
job_folder = '/checkpoint/tanmayshankar/jobs/' + datetime.date.today().strftime('%y_%m_%d')
mkdir(job_folder)
if len(opts.name) == 0:
# read name from command
opts.name = re.search('--name=\w+', opts.cmd).group(0)[7:]
print(opts.name)
slurm_cmd = '#!/bin/bash\n\n'
slurm_cmd += '#SBATCH --job-name={}\n'.format(opts.name)
slurm_cmd += '#SBATCH --output={}/{}-%j.out\n'.format(job_folder, opts.name)
slurm_cmd += '#SBATCH --error={}/{}-%j.err\n'.format(job_folder, opts.name)
# slurm_cmd += '#SBATCH --exclude=learnfair2038'
slurm_cmd += '\n'
slurm_cmd += '#SBATCH --partition={}\n'.format(opts.partition)
if len(opts.comment) > 0:
slurm_cmd += '#SBATCH --comment="{}"\n'.format(opts.comment)
slurm_cmd += '\n'
slurm_cmd += '#SBATCH --nodes={}\n'.format(opts.nodes)
slurm_cmd += '#SBATCH --ntasks-per-node={}\n'.format(opts.ntp)
if opts.ngpus > 0:
slurm_cmd += '#SBATCH --gres=gpu:{}\n'.format(opts.ngpus)
slurm_cmd += '#SBATCH --cpus-per-task={}\n'.format(opts.ncpus)
slurm_cmd += '#SBATCH --time={}\n'.format(opts.time)
slurm_cmd += '\n'
slurm_cmd += 'source {}\n'.format(opts.setup)
slurm_cmd += 'cd {} \n\n'.format(opts.workdir)
slurm_cmd += '{}\n'.format(opts.cmd)
job_fname = '{}/{}.sh'.format(job_folder, ''.join(random.choices(string.ascii_letters, k=8)))
with open(job_fname, 'w') as f:
f.write(slurm_cmd)
#print('sbatch {}'.format(job_fname))
os.system('sbatch {}'.format(job_fname))
if __name__ == '__main__':
app.run(main)
| CausalSkillLearning-main | Experiments/cluster_run.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from headers import *
# Check if CUDA is available, set device to GPU if it is, otherwise use CPU.
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
class PolicyNetwork_BaseClass(torch.nn.Module):
def __init__(self):
# Ensures inheriting from torch.nn.Module goes nicely and cleanly.
super(PolicyNetwork_BaseClass, self).__init__()
def sample_action(self, action_probabilities):
# Categorical distribution sampling.
sample_action = torch.distributions.Categorical(probs=action_probabilities).sample().squeeze(0)
return sample_action
def select_greedy_action(self, action_probabilities):
# Select action with max probability for test time.
return action_probabilities.argmax()
# def select_epsilon_greedy_action(self, action_probabilities):
# epsilon = 0.1
# if np.random.random()<epsilon:
# return self.sample_action(action_probabilities)
# else:
# return self.select_greedy_action(action_probabilities)
def select_epsilon_greedy_action(self, action_probabilities, epsilon=0.1):
epsilon = epsilon
whether_greedy = torch.rand(action_probabilities.shape[0]).to(device)
sample_actions = torch.where(whether_greedy<epsilon, self.sample_action(action_probabilities), self.select_greedy_action(action_probabilities))
return sample_actions
class PolicyNetwork(PolicyNetwork_BaseClass):
# REMEMBER, in the Bi-directional model, this is going to be evaluated for log-probabilities alone.
# Forward pass set up for evaluating this already.
# Policy Network inherits from torch.nn.Module.
# Now we overwrite the init, forward functions. And define anything else that we need.
def __init__(self, input_size, hidden_size, output_size, number_subpolicies, number_layers=4, batch_size=1, whether_latentb_input=False):
# Ensures inheriting from torch.nn.Module goes nicely and cleanly.
super(PolicyNetwork, self).__init__()
if whether_latentb_input:
self.input_size = input_size+number_subpolicies+1
else:
self.input_size = input_size+number_subpolicies
self.hidden_size = hidden_size
self.output_size = output_size
self.num_layers = number_layers
self.batch_size = batch_size
# Create LSTM Network.
self.lstm = torch.nn.LSTM(input_size=self.input_size,hidden_size=self.hidden_size,num_layers=self.num_layers)
# Define output layers for the LSTM, and activations for this output layer.
self.output_layer = torch.nn.Linear(self.hidden_size,self.output_size)
self.softmax_layer = torch.nn.Softmax(dim=1)
self.batch_logsoftmax_layer = torch.nn.LogSoftmax(dim=2)
self.batch_softmax_layer = torch.nn.Softmax(dim=2)
def forward(self, input, hidden=None, return_log_probabilities=False):
# The argument hidden_input here is the initial hidden state we want to feed to the LSTM.
# Assume inputs is the trajectory sequence.
# Input Format must be: Sequence_Length x Batch_Size x Input_Size.
format_input = input.view((input.shape[0], self.batch_size, self.input_size))
outputs, hidden = self.lstm(format_input)
# Takes softmax of last output.
if return_log_probabilities:
# Computes log probabilities, needed for loss function and log likelihood.
preprobability_outputs = self.output_layer(outputs)
log_probabilities = self.batch_logsoftmax_layer(preprobability_outputs).squeeze(1)
probabilities = self.batch_softmax_layer(preprobability_outputs).squeeze(1)
return outputs, hidden, log_probabilities, probabilities
else:
# Compute action probabilities for sampling.
softmax_output = self.softmax_layer(self.output_layer(outputs[-1]))
return outputs, hidden, softmax_output
class ContinuousPolicyNetwork(PolicyNetwork_BaseClass):
# REMEMBER, in the Bi-directional model, this is going to be evaluated for log-probabilities alone.
# Forward pass set up for evaluating this already.
# Policy Network inherits from torch.nn.Module.
# Now we overwrite the init, forward functions. And define anything else that we need.
# def __init__(self, input_size, hidden_size, output_size, number_subpolicies, number_layers=4, batch_size=1):
# def __init__(self, input_size, hidden_size, output_size, z_space_size, number_layers=4, batch_size=1, whether_latentb_input=False):
def __init__(self, input_size, hidden_size, output_size, args, number_layers=4, whether_latentb_input=False, zero_z_dim=False, small_init=False):
# Ensures inheriting from torch.nn.Module goes nicely and cleanly.
# super().__init__()
super(ContinuousPolicyNetwork, self).__init__()
self.hidden_size = hidden_size
# The output size here must be mean+variance for each dimension.
# This is output_size*2.
self.args = args
self.output_size = output_size
self.num_layers = number_layers
self.batch_size = self.args.batch_size
if whether_latentb_input:
self.input_size = input_size+self.args.z_dimensions+1
else:
if zero_z_dim:
self.input_size = input_size
else:
self.input_size = input_size+self.args.z_dimensions
# Create LSTM Network.
self.lstm = torch.nn.LSTM(input_size=self.input_size,hidden_size=self.hidden_size,num_layers=self.num_layers)
# Define output layers for the LSTM, and activations for this output layer.
self.mean_output_layer = torch.nn.Linear(self.hidden_size,self.output_size)
self.variances_output_layer = torch.nn.Linear(self.hidden_size, self.output_size)
# # Try initializing the network to something, so that we can escape the stupid constant output business.
if small_init:
for name, param in self.mean_output_layer.named_parameters():
if 'bias' in name:
torch.nn.init.constant_(param, 0.0)
elif 'weight' in name:
torch.nn.init.xavier_normal_(param,gain=0.0001)
self.activation_layer = torch.nn.Tanh()
self.variance_activation_layer = torch.nn.Softplus()
self.variance_activation_bias = 0.
self.variance_factor = 0.01
def forward(self, input, action_sequence, epsilon=0.001):
# Input is the trajectory sequence of shape: Sequence_Length x 1 x Input_Size.
# Here, we also need the continuous actions as input to evaluate their logprobability / probability.
# format_input = torch.tensor(input).view(input.shape[0], self.batch_size, self.input_size).float().to(device)
format_input = input.view((input.shape[0], self.batch_size, self.input_size))
hidden = None
format_action_seq = torch.from_numpy(action_sequence).to(device).float().view(action_sequence.shape[0],1,self.output_size)
lstm_outputs, hidden = self.lstm(format_input)
# Predict Gaussian means and variances.
if self.args.mean_nonlinearity:
mean_outputs = self.activation_layer(self.mean_output_layer(lstm_outputs))
else:
mean_outputs = self.mean_output_layer(lstm_outputs)
variance_outputs = (self.variance_activation_layer(self.variances_output_layer(lstm_outputs))+self.variance_activation_bias)
# variance_outputs = self.variance_factor*(self.variance_activation_layer(self.variances_output_layer(lstm_outputs))+self.variance_activation_bias) + epsilon
# Remember, because of Pytorch's dynamic construction, this distribution can have it's own batch size.
# It doesn't matter if batch sizes changes over different forward passes of the LSTM, because we're only going
# to evaluate this distribution (instance)'s log probability with the same sequence length.
dist = torch.distributions.MultivariateNormal(mean_outputs, torch.diag_embed(variance_outputs))
log_probabilities = dist.log_prob(format_action_seq)
# log_probabilities = torch.distributions.MultivariateNormal(mean_outputs, torch.diag_embed(variance_outputs)).log_prob(format_action_seq)
entropy = dist.entropy()
if self.args.debug:
print("Embedding in the policy network.")
embed()
return log_probabilities, entropy
def get_actions(self, input, greedy=False):
format_input = input.view((input.shape[0], self.batch_size, self.input_size))
hidden = None
lstm_outputs, hidden = self.lstm(format_input)
# Predict Gaussian means and variances.
if self.args.mean_nonlinearity:
mean_outputs = self.activation_layer(self.mean_output_layer(lstm_outputs))
else:
mean_outputs = self.mean_output_layer(lstm_outputs)
variance_outputs = (self.variance_activation_layer(self.variances_output_layer(lstm_outputs))+self.variance_activation_bias)
if greedy:
return mean_outputs
else:
# Remember, because of Pytorch's dynamic construction, this distribution can have it's own batch size.
# It doesn't matter if batch sizes changes over different forward passes of the LSTM, because we're only going
# to evaluate this distribution (instance)'s log probability with the same sequence length.
dist = torch.distributions.MultivariateNormal(mean_outputs, torch.diag_embed(variance_outputs))
return dist.sample()
def reparameterized_get_actions(self, input, greedy=False, action_epsilon=0.):
format_input = input.view((input.shape[0], self.batch_size, self.input_size))
hidden = None
lstm_outputs, hidden = self.lstm(format_input)
# Predict Gaussian means and variances.
if self.args.mean_nonlinearity:
mean_outputs = self.activation_layer(self.mean_output_layer(lstm_outputs))
else:
mean_outputs = self.mean_output_layer(lstm_outputs)
variance_outputs = (self.variance_activation_layer(self.variances_output_layer(lstm_outputs))+self.variance_activation_bias)
noise = torch.randn_like(variance_outputs)
if greedy:
action = mean_outputs
else:
# Instead of *sampling* the action from a distribution, construct using mu + sig * eps (random noise).
action = mean_outputs + variance_outputs * noise
return action
def incremental_reparam_get_actions(self, input, greedy=False, action_epsilon=0., hidden=None):
# Input should be a single timestep input here.
format_input = input.view((input.shape[0], self.batch_size, self.input_size))
# Instead of feeding in entire input sequence, we are feeding in current timestep input and previous hidden state.
lstm_outputs, hidden = self.lstm(format_input, hidden)
# Predict Gaussian means and variances.
if self.args.mean_nonlinearity:
mean_outputs = self.activation_layer(self.mean_output_layer(lstm_outputs))
else:
mean_outputs = self.mean_output_layer(lstm_outputs)
variance_outputs = (self.variance_activation_layer(self.variances_output_layer(lstm_outputs))+self.variance_activation_bias)
noise = torch.randn_like(variance_outputs)
if greedy:
action = mean_outputs
else:
# Instead of *sampling* the action from a distribution, construct using mu + sig * eps (random noise).
action = mean_outputs + variance_outputs * noise
return action, hidden
def get_regularization_kl(self, input_z1, input_z2):
# Input is the trajectory sequence of shape: Sequence_Length x 1 x Input_Size.
# Here, we also need the continuous actions as input to evaluate their logprobability / probability.
format_input_z1 = input_z1.view(input_z1.shape[0], self.batch_size, self.input_size)
format_input_z2 = input_z2.view(input_z2.shape[0], self.batch_size, self.input_size)
hidden = None
# format_action_seq = torch.from_numpy(action_sequence).to(device).float().view(action_sequence.shape[0],1,self.output_size)
lstm_outputs_z1, _ = self.lstm(format_input_z1)
# Reset hidden?
lstm_outputs_z2, _ = self.lstm(format_input_z2)
# Predict Gaussian means and variances.
if self.args.mean_nonlinearity:
mean_outputs_z1 = self.activation_layer(self.mean_output_layer(lstm_outputs_z1))
mean_outputs_z2 = self.activation_layer(self.mean_output_layer(lstm_outputs_z2))
else:
mean_outputs_z1 = self.mean_output_layer(lstm_outputs_z1)
mean_outputs_z2 = self.mean_output_layer(lstm_outputs_z2)
variance_outputs_z1 = self.variance_activation_layer(self.variances_output_layer(lstm_outputs_z1))+self.variance_activation_bias
variance_outputs_z2 = self.variance_activation_layer(self.variances_output_layer(lstm_outputs_z2))+self.variance_activation_bias
dist_z1 = torch.distributions.MultivariateNormal(mean_outputs_z1, torch.diag_embed(variance_outputs_z1))
dist_z2 = torch.distributions.MultivariateNormal(mean_outputs_z2, torch.diag_embed(variance_outputs_z2))
kl_divergence = torch.distributions.kl_divergence(dist_z1, dist_z2)
return kl_divergence
class LatentPolicyNetwork(PolicyNetwork_BaseClass):
# REMEMBER, in the Bi-directional Information model, this is going to be evaluated for log-probabilities alone.
# THIS IS STILL A SINGLE DIRECTION LSTM!!
# This still needs to be written separately from the normal sub-policy network(s) because it also requires termination probabilities.
# Must change forward pass back to using lstm() directly on the entire sequence rather than iterating.
# Now we have the whole input sequence beforehand.
# Policy Network inherits from torch.nn.Module.
# Now we overwrite the init, forward functions. And define anything else that we need.
def __init__(self, input_size, hidden_size, number_subpolicies, number_layers=4, b_exploration_bias=0., batch_size=1):
# Ensures inheriting from torch.nn.Module goes nicely and cleanly.
# super().__init__()
super(LatentPolicyNetwork, self).__init__()
# Input size is actually input_size + number_subpolicies +1
self.input_size = input_size+number_subpolicies+1
self.offset_for_z = input_size+1
self.hidden_size = hidden_size
self.number_subpolicies = number_subpolicies
self.output_size = number_subpolicies
self.num_layers = number_layers
self.b_exploration_bias = b_exploration_bias
self.batch_size = batch_size
# Define LSTM.
self.lstm = torch.nn.LSTM(input_size=self.input_size,hidden_size=self.hidden_size,num_layers=self.num_layers).to(device)
# # Try initializing the network to something, so that we can escape the stupid constant output business.
for name, param in self.lstm.named_parameters():
if 'bias' in name:
torch.nn.init.constant_(param, 0.0)
elif 'weight' in name:
torch.nn.init.xavier_normal_(param,gain=5)
# Transform to output space - Latent z and Latent b.
self.subpolicy_output_layer = torch.nn.Linear(self.hidden_size,self.output_size)
self.termination_output_layer = torch.nn.Linear(self.hidden_size,2)
# Sigmoid and Softmax activation functions for Bernoulli termination probability and latent z selection .
self.batch_softmax_layer = torch.nn.Softmax(dim=2)
self.batch_logsoftmax_layer = torch.nn.LogSoftmax(dim=2)
def forward(self, input):
# Input Format must be: Sequence_Length x 1 x Input_Size.
format_input = input.view((input.shape[0], self.batch_size, self.input_size))
hidden = None
outputs, hidden = self.lstm(format_input)
latent_z_preprobabilities = self.subpolicy_output_layer(outputs)
latent_b_preprobabilities = self.termination_output_layer(outputs) + self.b_exploration_bias
latent_z_probabilities = self.batch_softmax_layer(latent_z_preprobabilities).squeeze(1)
latent_b_probabilities = self.batch_softmax_layer(latent_b_preprobabilities).squeeze(1)
latent_z_logprobabilities = self.batch_logsoftmax_layer(latent_z_preprobabilities).squeeze(1)
latent_b_logprobabilities = self.batch_logsoftmax_layer(latent_b_preprobabilities).squeeze(1)
# Return log probabilities.
return latent_z_logprobabilities, latent_b_logprobabilities, latent_b_probabilities, latent_z_probabilities
def get_actions(self, input, greedy=False):
# Input Format must be: Sequence_Length x 1 x Input_Size.
format_input = input.view((input.shape[0], self.batch_size, self.input_size))
hidden = None
outputs, hidden = self.lstm(format_input)
latent_z_preprobabilities = self.subpolicy_output_layer(outputs)
latent_b_preprobabilities = self.termination_output_layer(outputs) + self.b_exploration_bias
latent_z_probabilities = self.batch_softmax_layer(latent_z_preprobabilities).squeeze(1)
latent_b_probabilities = self.batch_softmax_layer(latent_b_preprobabilities).squeeze(1)
if greedy==True:
selected_b = self.select_greedy_action(latent_b_probabilities)
selected_z = self.select_greedy_action(latent_z_probabilities)
else:
selected_b = self.sample_action(latent_b_probabilities)
selected_z = self.sample_action(latent_z_probabilities)
return selected_b, selected_z
def select_greedy_action(self, action_probabilities):
# Select action with max probability for test time.
# NEED TO USE DIMENSION OF ARGMAX.
return action_probabilities.argmax(dim=-1)
class ContinuousLatentPolicyNetwork(PolicyNetwork_BaseClass):
# def __init__(self, input_size, hidden_size, z_dimensions, number_layers=4, b_exploration_bias=0., batch_size=1):
def __init__(self, input_size, hidden_size, args, number_layers=4):
# Ensures inheriting from torch.nn.Module goes nicely and cleanly.
# super().__init__()
super(ContinuousLatentPolicyNetwork, self).__init__()
self.args = args
# Input size is actually input_size + number_subpolicies +1
self.input_size = input_size+self.args.z_dimensions+1
self.offset_for_z = input_size+1
self.hidden_size = hidden_size
# self.number_subpolicies = number_subpolicies
self.output_size = self.args.z_dimensions
self.num_layers = number_layers
self.b_exploration_bias = self.args.b_exploration_bias
self.batch_size = self.args.batch_size
# Define LSTM.
self.lstm = torch.nn.LSTM(input_size=self.input_size,hidden_size=self.hidden_size,num_layers=self.num_layers).to(device)
# Transform to output space - Latent z and Latent b.
# self.subpolicy_output_layer = torch.nn.Linear(self.hidden_size,self.output_size)
self.termination_output_layer = torch.nn.Linear(self.hidden_size,2)
# Sigmoid and Softmax activation functions for Bernoulli termination probability and latent z selection .
self.batch_softmax_layer = torch.nn.Softmax(dim=2)
self.batch_logsoftmax_layer = torch.nn.LogSoftmax(dim=2)
# Define output layers for the LSTM, and activations for this output layer.
self.mean_output_layer = torch.nn.Linear(self.hidden_size,self.output_size)
self.variances_output_layer = torch.nn.Linear(self.hidden_size, self.output_size)
self.activation_layer = torch.nn.Tanh()
self.variance_activation_layer = torch.nn.Softplus()
self.variance_activation_bias = 0.
self.variance_factor = 0.01
# # # Try initializing the network to something, so that we can escape the stupid constant output business.
for name, param in self.lstm.named_parameters():
if 'bias' in name:
torch.nn.init.constant_(param, 0.001)
elif 'weight' in name:
torch.nn.init.xavier_normal_(param,gain=5)
# Also initializing mean_output_layer to something large...
for name, param in self.mean_output_layer.named_parameters():
if 'bias' in name:
torch.nn.init.constant_(param, 0.)
elif 'weight' in name:
torch.nn.init.xavier_normal_(param,gain=2)
def forward(self, input, epsilon=0.001):
# Input Format must be: Sequence_Length x 1 x Input_Size.
format_input = input.view((input.shape[0], self.batch_size, self.input_size))
hidden = None
outputs, hidden = self.lstm(format_input)
latent_b_preprobabilities = self.termination_output_layer(outputs)
latent_b_probabilities = self.batch_softmax_layer(latent_b_preprobabilities).squeeze(1)
latent_b_logprobabilities = self.batch_logsoftmax_layer(latent_b_preprobabilities).squeeze(1)
# Predict Gaussian means and variances.
if self.args.mean_nonlinearity:
mean_outputs = self.activation_layer(self.mean_output_layer(outputs))
else:
mean_outputs = self.mean_output_layer(outputs)
variance_outputs = self.variance_factor*(self.variance_activation_layer(self.variances_output_layer(outputs))+self.variance_activation_bias) + epsilon
# This should be a SET of distributions.
self.dists = torch.distributions.MultivariateNormal(mean_outputs, torch.diag_embed(variance_outputs))
if self.args.debug:
print("Embedding in Latent Policy.")
embed()
# Return log probabilities.
return latent_b_logprobabilities, latent_b_probabilities, self.dists
def get_actions(self, input, greedy=False, epsilon=0.001):
format_input = input.view((input.shape[0], self.batch_size, self.input_size))
hidden = None
outputs, hidden = self.lstm(format_input)
latent_b_preprobabilities = self.termination_output_layer(outputs) + self.b_exploration_bias
latent_b_probabilities = self.batch_softmax_layer(latent_b_preprobabilities).squeeze(1)
# Predict Gaussian means and variances.
if self.args.mean_nonlinearity:
mean_outputs = self.activation_layer(self.mean_output_layer(outputs))
else:
mean_outputs = self.mean_output_layer(outputs)
# We should be multiply by self.variance_factor.
variance_outputs = self.variance_factor*(self.variance_activation_layer(self.variances_output_layer(outputs))+self.variance_activation_bias) + epsilon
# This should be a SET of distributions.
self.dists = torch.distributions.MultivariateNormal(mean_outputs, torch.diag_embed(variance_outputs))
if greedy==True:
selected_b = self.select_greedy_action(latent_b_probabilities)
selected_z = mean_outputs
else:
# selected_b = self.sample_action(latent_b_probabilities)
selected_b = self.select_greedy_action(latent_b_probabilities)
selected_z = self.dists.sample()
return selected_b, selected_z
def incremental_reparam_get_actions(self, input, greedy=False, action_epsilon=0.001, hidden=None, previous_z=None):
format_input = input.view((input.shape[0], self.batch_size, self.input_size))
outputs, hidden = self.lstm(format_input, hidden)
latent_b_preprobabilities = self.termination_output_layer(outputs)
latent_b_probabilities = self.batch_softmax_layer(latent_b_preprobabilities).squeeze(1)
# Greedily select b.
selected_b = self.select_greedy_action(latent_b_probabilities)
# Predict Gaussian means and variances.
if self.args.mean_nonlinearity:
mean_outputs = self.activation_layer(self.mean_output_layer(outputs))
else:
mean_outputs = self.mean_output_layer(outputs)
# We should be multiply by self.variance_factor.
variance_outputs = self.variance_factor*(self.variance_activation_layer(self.variances_output_layer(outputs))+self.variance_activation_bias) + action_epsilon
noise = torch.randn_like(variance_outputs)
if greedy:
selected_z = mean_outputs
else:
# Instead of *sampling* the action from a distribution, construct using mu + sig * eps (random noise).
selected_z = mean_outputs + variance_outputs * noise
# If single input and previous_Z is None, this is the first timestep. So set b to 1, and don't do anything to z.
if input.shape[0]==1 and previous_z is None:
selected_b[0] = 1
# If previous_Z is not None, this is not the first timestep, so don't do anything to z. If b is 0, use previous.
elif input.shape[0]==1 and previous_z is not None:
if selected_b==0:
selected_z = previous_z
elif input.shape[0]>1:
# Now modify z's as per New Z Selection.
# Set initial b to 1.
selected_b[0] = 1
# Initial z is already trivially set.
for t in range(1,input.shape[0]):
# If b_t==0, just use previous z.
# If b_t==1, sample new z. Here, we've cloned this from sampled_z's, so there's no need to do anything.
if selected_b[t]==0:
selected_z[t] = selected_z[t-1]
return selected_z, selected_b, hidden
def reparam_get_actions(self, input, greedy=False, action_epsilon=0.001, hidden=None):
# Wraps incremental
# MUST MODIFY INCREMENTAL ONE TO HANDLE NEW_Z_SELECTION (i.e. only choose new one if b is 1....)
# Set initial b to 1.
sampled_b[0] = 1
# Initial z is already trivially set.
for t in range(1,input.shape[0]):
# If b_t==0, just use previous z.
# If b_t==1, sample new z. Here, we've cloned this from sampled_z's, so there's no need to do anything.
if sampled_b[t]==0:
sampled_z_index[t] = sampled_z_index[t-1]
def select_greedy_action(self, action_probabilities):
# Select action with max probability for test time.
# NEED TO USE DIMENSION OF ARGMAX.
return action_probabilities.argmax(dim=-1)
class ContinuousLatentPolicyNetwork_ConstrainedBPrior(ContinuousLatentPolicyNetwork):
def __init__(self, input_size, hidden_size, args, number_layers=4):
# Ensures inheriting from torch.nn.Module goes nicely and cleanly.
super(ContinuousLatentPolicyNetwork_ConstrainedBPrior, self).__init__(input_size, hidden_size, args, number_layers)
# We can inherit the forward function from the above class... we just need to modify get actions.
self.min_skill_time = 12
self.max_skill_time = 16
def get_prior_value(self, elapsed_t, max_limit=5):
skill_time_limit = max_limit-1
if self.args.data=='MIME' or self.args.data=='Roboturk' or self.args.data=='OrigRoboturk' or self.args.data=='FullRoboturk' or self.args.data=='Mocap':
# If allowing variable skill length, set length for this sample.
if self.args.var_skill_length:
# Choose length of 12-16 with certain probabilities.
lens = np.array([12,13,14,15,16])
# probabilities = np.array([0.1,0.2,0.4,0.2,0.1])
prob_biases = np.array([[0.8,0.],[0.4,0.],[0.,0.],[0.,0.4]])
max_limit = 16
skill_time_limit = 12
else:
max_limit = 20
skill_time_limit = max_limit-1
prior_value = torch.zeros((1,2)).to(device).float()
# If at or over hard limit.
if elapsed_t>=max_limit:
prior_value[0,1]=1.
# If at or more than typical, less than hard limit:
elif elapsed_t>=skill_time_limit:
if self.args.var_skill_length:
prior_value[0] = torch.tensor(prob_biases[elapsed_t-skill_time_limit]).to(device).float()
else:
# Random
prior_value[0,1]=0.
# If less than typical.
else:
# Continue.
prior_value[0,0]=1.
return prior_value
def get_actions(self, input, greedy=False, epsilon=0.001, delta_t=0):
format_input = input.view((input.shape[0], self.batch_size, self.input_size))
hidden = None
outputs, hidden = self.lstm(format_input)
latent_b_preprobabilities = self.termination_output_layer(outputs) + self.b_exploration_bias
# Predict Gaussian means and variances.
if self.args.mean_nonlinearity:
mean_outputs = self.activation_layer(self.mean_output_layer(outputs))
else:
mean_outputs = self.mean_output_layer(outputs)
# We should be multiply by self.variance_factor.
variance_outputs = self.variance_factor*(self.variance_activation_layer(self.variances_output_layer(outputs))+self.variance_activation_bias) + epsilon
# This should be a SET of distributions.
self.dists = torch.distributions.MultivariateNormal(mean_outputs, torch.diag_embed(variance_outputs))
############################################
prior_value = self.get_prior_value(delta_t)
# Now... add prior value.
# Only need to do this to the last timestep... because the last sampled b is going to be copied into a different variable that is stored.
latent_b_preprobabilities[-1, :, :] += prior_value
latent_b_probabilities = self.batch_softmax_layer(latent_b_preprobabilities).squeeze(1)
# Sample b.
selected_b = self.select_greedy_action(latent_b_probabilities)
############################################
# Now implementing hard constrained b selection.
if delta_t < self.min_skill_time:
# Continue. Set b to 0.
selected_b[-1] = 0.
elif (self.min_skill_time <= delta_t) and (delta_t < self.max_skill_time):
pass
else:
# Stop and select a new z. Set b to 1.
selected_b[-1] = 1.
# Also get z... assume higher level funciton handles the new z selection component.
if greedy==True:
selected_z = mean_outputs
else:
selected_z = self.dists.sample()
return selected_b, selected_z
def incremental_reparam_get_actions(self, input, greedy=False, action_epsilon=0.001, hidden=None, previous_z=None, delta_t=0):
format_input = input.view((input.shape[0], self.batch_size, self.input_size))
outputs, hidden = self.lstm(format_input, hidden)
latent_b_preprobabilities = self.termination_output_layer(outputs)
############################################
# GET PRIOR AND ADD.
prior_value = self.get_prior_value(delta_t)
latent_b_preprobabilities[-1, :, :] += prior_value
############################################
latent_b_probabilities = self.batch_softmax_layer(latent_b_preprobabilities).squeeze(1)
# Greedily select b.
selected_b = self.select_greedy_action(latent_b_probabilities)
# Predict Gaussian means and variances.
if self.args.mean_nonlinearity:
mean_outputs = self.activation_layer(self.mean_output_layer(outputs))
else:
mean_outputs = self.mean_output_layer(outputs)
# We should be multiply by self.variance_factor.
variance_outputs = self.variance_factor*(self.variance_activation_layer(self.variances_output_layer(outputs))+self.variance_activation_bias) + action_epsilon
noise = torch.randn_like(variance_outputs)
if greedy:
selected_z = mean_outputs
else:
# Instead of *sampling* the action from a distribution, construct using mu + sig * eps (random noise).
selected_z = mean_outputs + variance_outputs * noise
# If single input and previous_Z is None, this is the first timestep. So set b to 1, and don't do anything to z.
if input.shape[0]==1 and previous_z is None:
selected_b[0] = 1
# If previous_Z is not None, this is not the first timestep, so don't do anything to z. If b is 0, use previous.
elif input.shape[0]==1 and previous_z is not None:
if selected_b==0:
selected_z = previous_z
elif input.shape[0]>1:
# Now modify z's as per New Z Selection.
# Set initial b to 1.
selected_b[0] = 1
# Initial z is already trivially set.
for t in range(1,input.shape[0]):
# If b_t==0, just use previous z.
# If b_t==1, sample new z. Here, we've cloned this from sampled_z's, so there's no need to do anything.
if selected_b[t]==0:
selected_z[t] = selected_z[t-1]
return selected_z, selected_b, hidden
class VariationalPolicyNetwork(PolicyNetwork_BaseClass):
# Policy Network inherits from torch.nn.Module.
# Now we overwrite the init, forward functions. And define anything else that we need.
# def __init__(self, input_size, hidden_size, number_subpolicies, number_layers=4, z_exploration_bias=0., b_exploration_bias=0., batch_size=1):
def __init__(self, input_size, hidden_size, number_subpolicies, args, number_layers=4):
# Ensures inheriting from torch.nn.Module goes nicely and cleanly.
# super().__init__()
super(VariationalPolicyNetwork, self).__init__()
self.args = args
self.input_size = input_size
self.hidden_size = hidden_size
self.number_subpolicies = number_subpolicies
self.output_size = number_subpolicies
self.num_layers = number_layers
self.z_exploration_bias = self.args.z_exploration_bias
self.b_exploration_bias = self.args.b_exploration_bias
self.z_probability_factor = self.args.z_probability_factor
self.b_probability_factor = self.args.b_probability_factor
self.batch_size = self.args.batch_size
# Define a bidirectional LSTM now.
self.lstm = torch.nn.LSTM(input_size=self.input_size,hidden_size=self.hidden_size,num_layers=self.num_layers, bidirectional=True)
# Transform to output space - Latent z and Latent b.
# THIS OUTPUT LAYER TAKES 2*HIDDEN SIZE as input because it's bidirectional.
self.subpolicy_output_layer = torch.nn.Linear(2*self.hidden_size,self.output_size)
self.termination_output_layer = torch.nn.Linear(2*self.hidden_size,2)
# Softmax activation functions for Bernoulli termination probability and latent z selection .
self.batch_softmax_layer = torch.nn.Softmax(dim=2)
self.batch_logsoftmax_layer = torch.nn.LogSoftmax(dim=2)
def sample_latent_variables(self, subpolicy_outputs, termination_output_layer):
# Run sampling layers.
sample_z = self.sample_action(subpolicy_outputs)
sample_b = self.sample_action(termination_output_layer)
return sample_z, sample_b
def sample_latent_variables_epsilon_greedy(self, subpolicy_outputs, termination_output_layer, epsilon):
sample_z = self.select_epsilon_greedy_action(subpolicy_outputs, epsilon)
sample_b = self.select_epsilon_greedy_action(termination_output_layer, epsilon)
return sample_z, sample_b
def forward(self, input, epsilon, new_z_selection=True):
# Input Format must be: Sequence_Length x 1 x Input_Size.
format_input = input.view((input.shape[0], self.batch_size, self.input_size))
hidden = None
outputs, hidden = self.lstm(format_input)
# Damping factor for probabilities to prevent washing out of bias.
variational_z_preprobabilities = self.subpolicy_output_layer(outputs)*self.z_probability_factor + self.z_exploration_bias
# variational_b_preprobabilities = self.termination_output_layer(outputs) + self.b_exploration_bias
# Damping factor for probabilities to prevent washing out of bias.
variational_b_preprobabilities = self.termination_output_layer(outputs)*self.b_probability_factor
# Add b continuation bias to the continuing option at every timestep.
variational_b_preprobabilities[:,0,0] += self.b_exploration_bias
variational_z_probabilities = self.batch_softmax_layer(variational_z_preprobabilities).squeeze(1)
variational_b_probabilities = self.batch_softmax_layer(variational_b_preprobabilities).squeeze(1)
variational_z_logprobabilities = self.batch_logsoftmax_layer(variational_z_preprobabilities).squeeze(1)
variational_b_logprobabilities = self.batch_logsoftmax_layer(variational_b_preprobabilities).squeeze(1)
# sampled_z_index, sampled_b = self.sample_latent_variables(variational_z_probabilities, variational_b_probabilities)
sampled_z_index, sampled_b = self.sample_latent_variables_epsilon_greedy(variational_z_probabilities, variational_b_probabilities, epsilon)
if new_z_selection:
# Set initial b to 1.
sampled_b[0] = 1
# # Trying cheeky thing to see if we can learn in this setting.
# sampled_b[1:] = 0
# Initial z is already trivially set.
for t in range(1,input.shape[0]):
# If b_t==0, just use previous z.
# If b_t==1, sample new z. Here, we've cloned this from sampled_z's, so there's no need to do anything.
if sampled_b[t]==0:
sampled_z_index[t] = sampled_z_index[t-1]
return sampled_z_index, sampled_b, variational_b_logprobabilities,\
variational_z_logprobabilities, variational_b_probabilities, variational_z_probabilities, None
def sample_action(self, action_probabilities):
# Categorical distribution sampling.
# Sampling can handle batched action_probabilities.
sample_action = torch.distributions.Categorical(probs=action_probabilities).sample()
return sample_action
def select_greedy_action(self, action_probabilities):
# Select action with max probability for test time.
# NEED TO USE DIMENSION OF ARGMAX.
return action_probabilities.argmax(dim=-1)
def select_epsilon_greedy_action(self, action_probabilities, epsilon=0.1):
epsilon = epsilon
# if np.random.random()<epsilon:
# # return(np.random.randint(0,high=len(action_probabilities)))
# return self.sample_action(action_probabilities)
# else:
# return self.select_greedy_action(action_probabilities)
# Issue with the current implementation is that it selects either sampling or greedy selection identically across the entire batch.
# This is stupid, use a toch.where instead?
# Sample an array of binary variables of size = batch size.
# For each, use greedy or ...
whether_greedy = torch.rand(action_probabilities.shape[0]).to(device)
sample_actions = torch.where(whether_greedy<epsilon, self.sample_action(action_probabilities), self.select_greedy_action(action_probabilities))
return sample_actions
def sample_termination(self, termination_probability):
sample_terminal = torch.distributions.Bernoulli(termination_probability).sample().squeeze(0)
return sample_terminal
class ContinuousVariationalPolicyNetwork(PolicyNetwork_BaseClass):
# def __init__(self, input_size, hidden_size, z_dimensions, number_layers=4, z_exploration_bias=0., b_exploration_bias=0., batch_size=1):
def __init__(self, input_size, hidden_size, z_dimensions, args, number_layers=4):
# Ensures inheriting from torch.nn.Module goes nicely and cleanly.
# super().__init__()
super(ContinuousVariationalPolicyNetwork, self).__init__()
self.args = args
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = z_dimensions
self.num_layers = number_layers
self.z_exploration_bias = self.args.z_exploration_bias
self.b_exploration_bias = self.args.b_exploration_bias
self.z_probability_factor = self.args.z_probability_factor
self.b_probability_factor = self.args.b_probability_factor
self.batch_size = self.args.batch_size
# Define a bidirectional LSTM now.
self.lstm = torch.nn.LSTM(input_size=self.input_size,hidden_size=self.hidden_size,num_layers=self.num_layers, bidirectional=True)
# Transform to output space - Latent z and Latent b.
# THIS OUTPUT LAYER TAKES 2*HIDDEN SIZE as input because it's bidirectional.
self.termination_output_layer = torch.nn.Linear(2*self.hidden_size,2)
# Softmax activation functions for Bernoulli termination probability and latent z selection .
self.batch_softmax_layer = torch.nn.Softmax(dim=-1)
self.batch_logsoftmax_layer = torch.nn.LogSoftmax(dim=-1)
# Define output layers for the LSTM, and activations for this output layer.
self.mean_output_layer = torch.nn.Linear(2*self.hidden_size,self.output_size)
self.variances_output_layer = torch.nn.Linear(2*self.hidden_size, self.output_size)
self.activation_layer = torch.nn.Tanh()
self.variance_activation_layer = torch.nn.Softplus()
self.variance_activation_bias = 0.
self.variance_factor = 0.01
def forward(self, input, epsilon, new_z_selection=True, var_epsilon=0.001):
# Input Format must be: Sequence_Length x 1 x Input_Size.
format_input = input.view((input.shape[0], self.batch_size, self.input_size))
hidden = None
outputs, hidden = self.lstm(format_input)
# Damping factor for probabilities to prevent washing out of bias.
variational_b_preprobabilities = self.termination_output_layer(outputs)*self.b_probability_factor
# Add b continuation bias to the continuing option at every timestep.
variational_b_preprobabilities[:,0,0] += self.b_exploration_bias
variational_b_probabilities = self.batch_softmax_layer(variational_b_preprobabilities).squeeze(1)
variational_b_logprobabilities = self.batch_logsoftmax_layer(variational_b_preprobabilities).squeeze(1)
# Predict Gaussian means and variances.
if self.args.mean_nonlinearity:
mean_outputs = self.activation_layer(self.mean_output_layer(outputs))
else:
mean_outputs = self.mean_output_layer(outputs)
# Still need a softplus activation for variance because needs to be positive.
variance_outputs = self.variance_factor*(self.variance_activation_layer(self.variances_output_layer(outputs))+self.variance_activation_bias) + var_epsilon
# This should be a SET of distributions.
self.dists = torch.distributions.MultivariateNormal(mean_outputs, torch.diag_embed(variance_outputs))
sampled_b = self.select_epsilon_greedy_action(variational_b_probabilities, epsilon)
if epsilon==0.:
sampled_z_index = mean_outputs.squeeze(1)
else:
# Whether to use reparametrization trick to retrieve the latent_z's.
if self.args.reparam:
if self.args.train:
noise = torch.randn_like(variance_outputs)
# Instead of *sampling* the latent z from a distribution, construct using mu + sig * eps (random noise).
sampled_z_index = mean_outputs + variance_outputs*noise
# Ought to be able to pass gradients through this latent_z now.
sampled_z_index = sampled_z_index.squeeze(1)
# If evaluating, greedily get action.
else:
sampled_z_index = mean_outputs.squeeze(1)
else:
sampled_z_index = self.dists.sample().squeeze(1)
if new_z_selection:
# Set initial b to 1.
sampled_b[0] = 1
# Initial z is already trivially set.
for t in range(1,input.shape[0]):
# If b_t==0, just use previous z.
# If b_t==1, sample new z. Here, we've cloned this from sampled_z's, so there's no need to do anything.
if sampled_b[t]==0:
sampled_z_index[t] = sampled_z_index[t-1]
# Also compute logprobabilities of the latent_z's sampled from this net.
variational_z_logprobabilities = self.dists.log_prob(sampled_z_index.unsqueeze(1))
variational_z_probabilities = None
# Set standard distribution for KL.
standard_distribution = torch.distributions.MultivariateNormal(torch.zeros((self.output_size)).to(device),torch.eye((self.output_size)).to(device))
# Compute KL.
kl_divergence = torch.distributions.kl_divergence(self.dists, standard_distribution)
# Prior loglikelihood
prior_loglikelihood = standard_distribution.log_prob(sampled_z_index)
# if self.args.debug:
# print("#################################")
# print("Embedding in Variational Network.")
# embed()
return sampled_z_index, sampled_b, variational_b_logprobabilities,\
variational_z_logprobabilities, variational_b_probabilities, variational_z_probabilities, kl_divergence, prior_loglikelihood
def sample_action(self, action_probabilities):
# Categorical distribution sampling.
# Sampling can handle batched action_probabilities.
sample_action = torch.distributions.Categorical(probs=action_probabilities).sample()
return sample_action
def select_greedy_action(self, action_probabilities):
# Select action with max probability for test time.
# NEED TO USE DIMENSION OF ARGMAX.
return action_probabilities.argmax(dim=-1)
def select_epsilon_greedy_action(self, action_probabilities, epsilon=0.1):
epsilon = epsilon
# if np.random.random()<epsilon:
# # return(np.random.randint(0,high=len(action_probabilities)))
# return self.sample_action(action_probabilities)
# else:
# return self.select_greedy_action(action_probabilities)
# Issue with the current implementation is that it selects either sampling or greedy selection identically across the entire batch.
# This is stupid, use a toch.where instead?
# Sample an array of binary variables of size = batch size.
# For each, use greedy or ...
whether_greedy = torch.rand(action_probabilities.shape[0]).to(device)
sample_actions = torch.where(whether_greedy<epsilon, self.sample_action(action_probabilities), self.select_greedy_action(action_probabilities))
return sample_actions
class ContinuousVariationalPolicyNetwork_BPrior(ContinuousVariationalPolicyNetwork):
def __init__(self, input_size, hidden_size, z_dimensions, args, number_layers=4):
# Ensures inheriting from torch.nn.Module goes nicely and cleanly.
super(ContinuousVariationalPolicyNetwork_BPrior, self).__init__(input_size, hidden_size, z_dimensions, args, number_layers)
def get_prior_value(self, elapsed_t, max_limit=5):
skill_time_limit = max_limit-1
if self.args.data=='MIME' or self.args.data=='Roboturk' or self.args.data=='OrigRoboturk' or self.args.data=='FullRoboturk' or self.args.data=='Mocap':
# If allowing variable skill length, set length for this sample.
if self.args.var_skill_length:
# Choose length of 12-16 with certain probabilities.
lens = np.array([12,13,14,15,16])
# probabilities = np.array([0.1,0.2,0.4,0.2,0.1])
prob_biases = np.array([[0.8,0.],[0.4,0.],[0.,0.],[0.,0.4]])
max_limit = 16
skill_time_limit = 12
else:
max_limit = 20
skill_time_limit = max_limit-1
prior_value = torch.zeros((1,2)).to(device).float()
# If at or over hard limit.
if elapsed_t>=max_limit:
prior_value[0,1]=1.
# If at or more than typical, less than hard limit:
elif elapsed_t>=skill_time_limit:
if self.args.var_skill_length:
prior_value[0] = torch.tensor(prob_biases[elapsed_t-skill_time_limit]).to(device).float()
else:
# Random
prior_value[0,1]=0.
# If less than typical.
else:
# Continue.
prior_value[0,0]=1.
return prior_value
def forward(self, input, epsilon, new_z_selection=True):
# Input Format must be: Sequence_Length x 1 x Input_Size.
format_input = input.view((input.shape[0], self.batch_size, self.input_size))
hidden = None
outputs, hidden = self.lstm(format_input)
# Damping factor for probabilities to prevent washing out of bias.
variational_b_preprobabilities = self.termination_output_layer(outputs)*self.b_probability_factor
# Predict Gaussian means and variances.
if self.args.mean_nonlinearity:
mean_outputs = self.activation_layer(self.mean_output_layer(outputs))
else:
mean_outputs = self.mean_output_layer(outputs)
# Still need a softplus activation for variance because needs to be positive.
variance_outputs = self.variance_factor*(self.variance_activation_layer(self.variances_output_layer(outputs))+self.variance_activation_bias) + epsilon
# This should be a SET of distributions.
self.dists = torch.distributions.MultivariateNormal(mean_outputs, torch.diag_embed(variance_outputs))
prev_time = 0
# Create variables for prior and probs.
prior_values = torch.zeros_like(variational_b_preprobabilities).to(device).float()
variational_b_probabilities = torch.zeros_like(variational_b_preprobabilities).to(device).float()
variational_b_logprobabilities = torch.zeros_like(variational_b_preprobabilities).to(device).float()
sampled_b = torch.zeros(input.shape[0]).to(device).int()
sampled_b[0] = 1
for t in range(1,input.shape[0]):
# Compute prior value.
delta_t = t-prev_time
# if self.args.debug:
# print("##########################")
# print("Time: ",t, " Prev Time:",prev_time, " Delta T:",delta_t)
prior_values[t] = self.get_prior_value(delta_t, max_limit=self.args.skill_length)
# Construct probabilities.
variational_b_probabilities[t,0,:] = self.batch_softmax_layer(variational_b_preprobabilities[t,0] + prior_values[t,0])
variational_b_logprobabilities[t,0,:] = self.batch_logsoftmax_layer(variational_b_preprobabilities[t,0] + prior_values[t,0])
sampled_b[t] = self.select_epsilon_greedy_action(variational_b_probabilities[t:t+1], epsilon)
if sampled_b[t]==1:
prev_time = t
# if self.args.debug:
# print("Sampled b:",sampled_b[t])
if epsilon==0.:
sampled_z_index = mean_outputs.squeeze(1)
else:
# Whether to use reparametrization trick to retrieve the latent_z's.
if self.args.reparam:
if self.args.train:
noise = torch.randn_like(variance_outputs)
# Instead of *sampling* the latent z from a distribution, construct using mu + sig * eps (random noise).
sampled_z_index = mean_outputs + variance_outputs*noise
# Ought to be able to pass gradients through this latent_z now.
sampled_z_index = sampled_z_index.squeeze(1)
# If evaluating, greedily get action.
else:
sampled_z_index = mean_outputs.squeeze(1)
else:
sampled_z_index = self.dists.sample().squeeze(1)
if new_z_selection:
# Set initial b to 1.
sampled_b[0] = 1
# Initial z is already trivially set.
for t in range(1,input.shape[0]):
# If b_t==0, just use previous z.
# If b_t==1, sample new z. Here, we've cloned this from sampled_z's, so there's no need to do anything.
if sampled_b[t]==0:
sampled_z_index[t] = sampled_z_index[t-1]
# Also compute logprobabilities of the latent_z's sampled from this net.
variational_z_logprobabilities = self.dists.log_prob(sampled_z_index.unsqueeze(1))
variational_z_probabilities = None
# Set standard distribution for KL.
standard_distribution = torch.distributions.MultivariateNormal(torch.zeros((self.output_size)).to(device),torch.eye((self.output_size)).to(device))
# Compute KL.
kl_divergence = torch.distributions.kl_divergence(self.dists, standard_distribution)
# Prior loglikelihood
prior_loglikelihood = standard_distribution.log_prob(sampled_z_index)
if self.args.debug:
print("#################################")
print("Embedding in Variational Network.")
embed()
return sampled_z_index, sampled_b, variational_b_logprobabilities.squeeze(1), \
variational_z_logprobabilities, variational_b_probabilities.squeeze(1), variational_z_probabilities, kl_divergence, prior_loglikelihood
class ContinuousVariationalPolicyNetwork_ConstrainedBPrior(ContinuousVariationalPolicyNetwork_BPrior):
def __init__(self, input_size, hidden_size, z_dimensions, args, number_layers=4):
# Ensures inheriting from torch.nn.Module goes nicely and cleanly.
super(ContinuousVariationalPolicyNetwork_ConstrainedBPrior, self).__init__(input_size, hidden_size, z_dimensions, args, number_layers)
self.min_skill_time = 12
self.max_skill_time = 16
def forward(self, input, epsilon, new_z_selection=True):
# Input Format must be: Sequence_Length x 1 x Input_Size.
format_input = input.view((input.shape[0], self.batch_size, self.input_size))
hidden = None
outputs, hidden = self.lstm(format_input)
# Damping factor for probabilities to prevent washing out of bias.
variational_b_preprobabilities = self.termination_output_layer(outputs)*self.b_probability_factor
# Predict Gaussian means and variances.
if self.args.mean_nonlinearity:
mean_outputs = self.activation_layer(self.mean_output_layer(outputs))
else:
mean_outputs = self.mean_output_layer(outputs)
# Still need a softplus activation for variance because needs to be positive.
variance_outputs = self.variance_factor*(self.variance_activation_layer(self.variances_output_layer(outputs))+self.variance_activation_bias) + epsilon
# This should be a SET of distributions.
self.dists = torch.distributions.MultivariateNormal(mean_outputs, torch.diag_embed(variance_outputs))
# Create variables for prior and probabilities.
prior_values = torch.zeros_like(variational_b_preprobabilities).to(device).float()
variational_b_probabilities = torch.zeros_like(variational_b_preprobabilities).to(device).float()
variational_b_logprobabilities = torch.zeros_like(variational_b_preprobabilities).to(device).float()
#######################################
################ Set B ################
#######################################
# Set the first b to 1, and the time b was == 1.
sampled_b = torch.zeros(input.shape[0]).to(device).int()
sampled_b[0] = 1
prev_time = 0
for t in range(1,input.shape[0]):
# Compute time since the last b occurred.
delta_t = t-prev_time
# Compute prior value.
prior_values[t] = self.get_prior_value(delta_t, max_limit=self.args.skill_length)
# Construct probabilities.
variational_b_probabilities[t,0,:] = self.batch_softmax_layer(variational_b_preprobabilities[t,0] + prior_values[t,0])
variational_b_logprobabilities[t,0,:] = self.batch_logsoftmax_layer(variational_b_preprobabilities[t,0] + prior_values[t,0])
# Now Implement Hard Restriction on Selection of B's.
if delta_t < self.min_skill_time:
# Set B to 0. I.e. Continue.
# variational_b_probabilities[t,0,:] = variational_b_probabilities[t,0,:]*0
# variational_b_probabilities[t,0,0] += 1
sampled_b[t] = 0.
elif (self.min_skill_time <= delta_t) and (delta_t < self.max_skill_time):
# Sample b.
sampled_b[t] = self.select_epsilon_greedy_action(variational_b_probabilities[t:t+1], epsilon)
elif self.max_skill_time <= delta_t:
# Set B to 1. I.e. select new z.
sampled_b[t] = 1.
# If b is 1, set the previous time to now.
if sampled_b[t]==1:
prev_time = t
#######################################
################ Set Z ################
#######################################
# Now set the z's. If greedy, just return the means.
if epsilon==0.:
sampled_z_index = mean_outputs.squeeze(1)
# If not greedy, then reparameterize.
else:
# Whether to use reparametrization trick to retrieve the latent_z's.
if self.args.train:
noise = torch.randn_like(variance_outputs)
# Instead of *sampling* the latent z from a distribution, construct using mu + sig * eps (random noise).
sampled_z_index = mean_outputs + variance_outputs*noise
# Ought to be able to pass gradients through this latent_z now.
sampled_z_index = sampled_z_index.squeeze(1)
# If evaluating, greedily get action.
else:
sampled_z_index = mean_outputs.squeeze(1)
# Modify z's based on whether b was 1 or 0. This part should remain the same.
if new_z_selection:
# Set initial b to 1.
sampled_b[0] = 1
# Initial z is already trivially set.
for t in range(1,input.shape[0]):
# If b_t==0, just use previous z.
# If b_t==1, sample new z. Here, we've cloned this from sampled_z's, so there's no need to do anything.
if sampled_b[t]==0:
sampled_z_index[t] = sampled_z_index[t-1]
# Also compute logprobabilities of the latent_z's sampled from this net.
variational_z_logprobabilities = self.dists.log_prob(sampled_z_index.unsqueeze(1))
variational_z_probabilities = None
# Set standard distribution for KL.
standard_distribution = torch.distributions.MultivariateNormal(torch.zeros((self.output_size)).to(device),torch.eye((self.output_size)).to(device))
# Compute KL.
kl_divergence = torch.distributions.kl_divergence(self.dists, standard_distribution)
# Prior loglikelihood
prior_loglikelihood = standard_distribution.log_prob(sampled_z_index)
if self.args.debug:
print("#################################")
print("Embedding in Variational Network.")
embed()
return sampled_z_index, sampled_b, variational_b_logprobabilities.squeeze(1), \
variational_z_logprobabilities, variational_b_probabilities.squeeze(1), variational_z_probabilities, kl_divergence, prior_loglikelihood
class EncoderNetwork(PolicyNetwork_BaseClass):
# Policy Network inherits from torch.nn.Module.
# Now we overwrite the init, forward functions. And define anything else that we need.
def __init__(self, input_size, hidden_size, output_size, number_subpolicies=4, batch_size=1):
# Ensures inheriting from torch.nn.Module goes nicely and cleanly.
super(EncoderNetwork, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.number_subpolicies = number_subpolicies
self.num_layers = 5
self.batch_size = batch_size
# Define a bidirectional LSTM now.
self.lstm = torch.nn.LSTM(input_size=self.input_size,hidden_size=self.hidden_size,num_layers=self.num_layers, bidirectional=True)
# Define output layers for the LSTM, and activations for this output layer.
# Because it's bidrectional, once we compute <outputs, hidden = self.lstm(input)>, we must concatenate:
# From reverse LSTM: <outputs[0,:,hidden_size:]> and from the forward LSTM: <outputs[-1,:,:hidden_size]>.
# (Refer - https://towardsdatascience.com/understanding-bidirectional-rnn-in-pytorch-5bd25a5dd66 )
# Because of this, the output layer must take in size 2*hidden.
self.hidden_layer = torch.nn.Linear(2*self.hidden_size, 2*self.hidden_size)
self.output_layer = torch.nn.Linear(2*self.hidden_size, self.output_size)
# Sigmoid and Softmax activation functions for Bernoulli termination probability and latent z selection .
self.batch_softmax_layer = torch.nn.Softmax(dim=2)
self.batch_logsoftmax_layer = torch.nn.LogSoftmax(dim=2)
def forward(self, input, epsilon):
# Input format must be: Sequence_Length x 1 x Input_Size.
# Assuming input is a numpy array.
format_input = input.view((input.shape[0], self.batch_size, self.input_size))
# Instead of iterating over time and passing each timestep's input to the LSTM, we can now just pass the entire input sequence.
outputs, hidden = self.lstm(format_input)
concatenated_outputs = torch.cat([outputs[0,:,self.hidden_size:],outputs[-1,:,:self.hidden_size]],dim=-1).view((1,1,-1))
# Calculate preprobs.
preprobabilities = self.output_layer(self.hidden_layer(concatenated_outputs))
probabilities = self.batch_softmax_layer(preprobabilities)
logprobabilities = self.batch_logsoftmax_layer(preprobabilities)
latent_z = self.select_epsilon_greedy_action(probabilities, epsilon=epsilon)
# Return latentz_encoding as output layer of last outputs.
return latent_z, logprobabilities, None, None
class ContinuousEncoderNetwork(PolicyNetwork_BaseClass):
# Policy Network inherits from torch.nn.Module.
# Now we overwrite the init, forward functions. And define anything else that we need.
def __init__(self, input_size, hidden_size, output_size, args, batch_size=1):
# Ensures inheriting from torch.nn.Module goes nicely and cleanly.
super(ContinuousEncoderNetwork, self).__init__()
self.args = args
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.num_layers = 5
self.batch_size = batch_size
# Define a bidirectional LSTM now.
self.lstm = torch.nn.LSTM(input_size=self.input_size,hidden_size=self.hidden_size,num_layers=self.num_layers, bidirectional=True)
# Define output layers for the LSTM, and activations for this output layer.
# # Because it's bidrectional, once we compute <outputs, hidden = self.lstm(input)>, we must concatenate:
# # From reverse LSTM: <outputs[0,:,hidden_size:]> and from the forward LSTM: <outputs[-1,:,:hidden_size]>.
# # (Refer - https://towardsdatascience.com/understanding-bidirectional-rnn-in-pytorch-5bd25a5dd66 )
# # Because of this, the output layer must take in size 2*hidden.
# self.hidden_layer = torch.nn.Linear(2*self.hidden_size, self.hidden_size)
# self.output_layer = torch.nn.Linear(2*self.hidden_size, self.output_size)
# Sigmoid and Softmax activation functions for Bernoulli termination probability and latent z selection .
self.batch_softmax_layer = torch.nn.Softmax(dim=2)
self.batch_logsoftmax_layer = torch.nn.LogSoftmax(dim=2)
# Define output layers for the LSTM, and activations for this output layer.
self.mean_output_layer = torch.nn.Linear(2*self.hidden_size,self.output_size)
self.variances_output_layer = torch.nn.Linear(2*self.hidden_size, self.output_size)
self.activation_layer = torch.nn.Tanh()
self.variance_activation_layer = torch.nn.Softplus()
self.variance_activation_bias = 0.
self.variance_factor = 0.01
def forward(self, input, epsilon=0.001, z_sample_to_evaluate=None):
# This epsilon passed as an argument is just so that the signature of this function is the same as what's provided to the discrete encoder network.
# Input format must be: Sequence_Length x 1 x Input_Size.
# Assuming input is a numpy array.
format_input = input.view((input.shape[0], self.batch_size, self.input_size))
# Instead of iterating over time and passing each timestep's input to the LSTM, we can now just pass the entire input sequence.
outputs, hidden = self.lstm(format_input)
concatenated_outputs = torch.cat([outputs[0,:,self.hidden_size:],outputs[-1,:,:self.hidden_size]],dim=-1).view((1,1,-1))
# Predict Gaussian means and variances.
# if self.args.mean_nonlinearity:
# mean_outputs = self.activation_layer(self.mean_output_layer(concatenated_outputs))
# else:
mean_outputs = self.mean_output_layer(concatenated_outputs)
variance_outputs = self.variance_factor*(self.variance_activation_layer(self.variances_output_layer(concatenated_outputs))+self.variance_activation_bias) + epsilon
dist = torch.distributions.MultivariateNormal(mean_outputs, torch.diag_embed(variance_outputs))
# Whether to use reparametrization trick to retrieve the
if self.args.reparam:
noise = torch.randn_like(variance_outputs)
# Instead of *sampling* the latent z from a distribution, construct using mu + sig * eps (random noise).
latent_z = mean_outputs + variance_outputs * noise
# Ought to be able to pass gradients through this latent_z now.
else:
# Retrieve sample from the distribution as the value of the latent variable.
latent_z = dist.sample()
# calculate entropy for training.
entropy = dist.entropy()
# Also retrieve log probability of the same.
logprobability = dist.log_prob(latent_z)
# Set standard distribution for KL.
standard_distribution = torch.distributions.MultivariateNormal(torch.zeros((self.output_size)).to(device),torch.eye((self.output_size)).to(device))
# Compute KL.
kl_divergence = torch.distributions.kl_divergence(dist, standard_distribution)
if self.args.debug:
print("###############################")
print("Embedding in Encoder Network.")
embed()
if z_sample_to_evaluate is None:
return latent_z, logprobability, entropy, kl_divergence
else:
logprobability = dist.log_prob(z_sample_to_evaluate)
return logprobability
class CriticNetwork(torch.nn.Module):
def __init__(self, input_size, hidden_size, output_size, args=None, number_layers=4):
super(CriticNetwork, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.number_layers = number_layers
self.batch_size = 1
# Create LSTM Network.
self.lstm = torch.nn.LSTM(input_size=self.input_size,hidden_size=self.hidden_size,num_layers=self.number_layers)
self.output_layer = torch.nn.Linear(self.hidden_size,self.output_size)
def forward(self, input):
format_input = input.view((input.shape[0], self.batch_size, self.input_size))
hidden = None
lstm_outputs, hidden = self.lstm(format_input)
# Predict critic value for each timestep.
critic_value = self.output_layer(lstm_outputs)
return critic_value
class ContinuousMLP(torch.nn.Module):
def __init__(self, input_size, hidden_size, output_size, args=None, number_layers=4):
super(ContinuousMLP, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.input_layer = torch.nn.Linear(self.input_size, self.hidden_size)
self.hidden_layer = torch.nn.Linear(self.hidden_size, self.hidden_size)
self.output_layer = torch.nn.Linear(self.hidden_size, self.output_size)
self.relu_activation = torch.nn.ReLU()
self.variance_activation_layer = torch.nn.Softplus()
def forward(self, input, greedy=False, action_epsilon=0.0001):
# Assumes input is Batch_Size x Input_Size.
h1 = self.relu_activation(self.input_layer(input))
h2 = self.relu_activation(self.hidden_layer(h1))
h3 = self.relu_activation(self.hidden_layer(h2))
h4 = self.relu_activation(self.hidden_layer(h3))
mean_outputs = self.output_layer(h4)
variance_outputs = self.variance_activation_layer(self.output_layer(h4))
noise = torch.randn_like(variance_outputs)
if greedy:
action = mean_outputs
else:
# Instead of *sampling* the action from a distribution, construct using mu + sig * eps (random noise).
action = mean_outputs + variance_outputs * noise
return action
def reparameterized_get_actions(self, input, greedy=False, action_epsilon=0.0001):
return self.forward(input, greedy, action_epsilon)
class CriticMLP(torch.nn.Module):
def __init__(self, input_size, hidden_size, output_size, args=None, number_layers=4):
super(CriticMLP, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.batch_size = 1
self.input_layer = torch.nn.Linear(self.input_size, self.hidden_size)
self.hidden_layer = torch.nn.Linear(self.hidden_size, self.hidden_size)
self.output_layer = torch.nn.Linear(self.hidden_size, self.output_size)
self.relu_activation = torch.nn.ReLU()
def forward(self, input):
# Assumes input is Batch_Size x Input_Size.
h1 = self.relu_activation(self.input_layer(input))
h2 = self.relu_activation(self.hidden_layer(h1))
h3 = self.relu_activation(self.hidden_layer(h2))
h4 = self.relu_activation(self.hidden_layer(h3))
# Predict critic value for each timestep.
critic_value = self.output_layer(h4)
return critic_value
class DiscreteMLP(torch.nn.Module):
def __init__(self, input_size, hidden_size, output_size, args=None, number_layers=4):
super(DiscreteMLP, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.input_layer = torch.nn.Linear(self.input_size, self.hidden_size)
self.hidden_layer = torch.nn.Linear(self.hidden_size, self.hidden_size)
self.output_layer = torch.nn.Linear(self.hidden_size, self.output_size)
self.relu_activation = torch.nn.ReLU()
self.batch_logsoftmax_layer = torch.nn.LogSoftmax(dim=2)
self.batch_softmax_layer = torch.nn.Softmax(dim=2 )
def forward(self, input):
# Assumes input is Batch_Size x Input_Size.
h1 = self.relu_activation(self.input_layer(input))
h2 = self.relu_activation(self.hidden_layer(h1))
h3 = self.relu_activation(self.hidden_layer(h2))
h4 = self.relu_activation(self.hidden_layer(h3))
# Compute preprobability with output layer.
preprobability_outputs = self.output_layer(h4)
# Compute probabilities and logprobabilities.
log_probabilities = self.batch_logsoftmax_layer(preprobability_outputs)
probabilities = self.batch_softmax_layer(preprobability_outputs)
return log_probabilities, probabilities
| CausalSkillLearning-main | Experiments/PolicyNetworks.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import mocap_processing, glob, numpy as np, os
from mocap_processing.motion.pfnn import Animation, BVH
from mocap_processing.motion.pfnn import Animation, BVH
from IPython import embed
# Define function that loads global and local posiitons, and the rotations from a datafile.
def load_animation_data(bvh_filename):
animation, joint_names, time_per_frame = BVH.load(bvh_filename)
global_positions = Animation.positions_global(animation)
# return global_positions, joint_parents, time_per_frame
return global_positions, animation.positions, animation.rotations, animation
# Set directory.
directory = "/checkpoint/dgopinath/amass/CMU"
save_directory = "/checkpoint/tanmayshankar/Mocap"
# Get file list.
filelist = glob.glob(os.path.join(directory,"*/*.bvh"))
demo_list = []
print("Starting to preprocess data.")
for i in range(len(filelist)):
print("Processing file number: ",i, " of ",len(filelist))
# Get filename.
filename = os.path.join(directory, filelist[i])
# Actually load file.
global_positions, local_positions, local_rotations, animation = load_animation_data(filename)
# Create data element object.
data_element = {}
data_element['global_positions'] = global_positions
data_element['local_positions'] = local_positions
# Get quaternion as array.
data_element['local_rotations'] = local_rotations.qs
data_element['animation'] = animation
demo_list.append(data_element)
demo_array = np.array(demo_list)
np.save(os.path.join(save_directory,"Demo_Array.npy"),demo_array) | CausalSkillLearning-main | Experiments/Processing_MocapData.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from headers import *
class GridWorldDataset(Dataset):
# Class implementing instance of dataset class for gridworld data.
def __init__(self, dataset_directory):
self.dataset_directory = dataset_directory
# For us, this is Research/Code/GraphPlanningNetworks/scripts/DatasetPlanning/CreateDemos/Demos2
self.action_map = np.array([[-1,0],[1,0],[0,-1],[0,1],[-1,-1],[-1,1],[1,-1],[1,1]])
## UP, DOWN, LEFT, RIGHT, UPLEFT, UPRIGHT, DOWNLEFT, DOWNRIGHT. ##
def __len__(self):
# Find out how many images we've stored.
filelist = glob.glob(os.path.join(self.dataset_directory,"*.png"))
# FOR NOW: USE ONLY till 3200 images.
return 3200
# return len(filelist)
def parse_trajectory_actions(self, coordinate_trajectory):
# Takes coordinate trajectory, returns action index taken.
state_diffs = np.diff(coordinate_trajectory,axis=0)
action_sequence = np.zeros((len(state_diffs)),dtype=int)
for i in range(len(state_diffs)):
for k in range(len(self.action_map)):
if (state_diffs[i]==self.action_map[k]).all():
action_sequence[i]=k
return action_sequence.astype(float)
def __getitem__(self, index):
# The getitem function must return a Map-Trajectory pair.
# We will handle per-timestep processes within our code.
# Assumes index is within range [0,len(filelist)-1]
image = cv2.imread(os.path.join(self.dataset_directory,"Image{0}.png".format(index)))
coordinate_trajectory = np.load(os.path.join(self.dataset_directory,"Image{0}_Traj1.npy".format(index))).astype(float)
action_sequence = self.parse_trajectory_actions(coordinate_trajectory)
return image, coordinate_trajectory, action_sequence
class SmallMapsDataset(Dataset):
# Class implementing instance of dataset class for gridworld data.
def __init__(self, dataset_directory):
self.dataset_directory = dataset_directory
# For us, this is Research/Code/GraphPlanningNetworks/scripts/DatasetPlanning/CreateDemos/Demos2
self.action_map = np.array([[-1,0],[1,0],[0,-1],[0,1],[-1,-1],[-1,1],[1,-1],[1,1]])
## UP, DOWN, LEFT, RIGHT, UPLEFT, UPRIGHT, DOWNLEFT, DOWNRIGHT. ##
def __len__(self):
# Find out how many images we've stored.
filelist = glob.glob(os.path.join(self.dataset_directory,"*.png"))
return 4000
# return len(filelist)
def parse_trajectory_actions(self, coordinate_trajectory):
# Takes coordinate trajectory, returns action index taken.
state_diffs = np.diff(coordinate_trajectory,axis=0)
action_sequence = np.zeros((len(state_diffs)),dtype=int)
for i in range(len(state_diffs)):
for k in range(len(self.action_map)):
if (state_diffs[i]==self.action_map[k]).all():
action_sequence[i]=k
return action_sequence.astype(float)
def __getitem__(self, index):
# The getitem function must return a Map-Trajectory pair.
# We will handle per-timestep processes within our code.
# Assumes index is within range [0,len(filelist)-1]
image = np.load(os.path.join(self.dataset_directory,"Map{0}.npy".format(index)))
time_limit = 20
coordinate_trajectory = np.load(os.path.join(self.dataset_directory,"Map{0}_Traj1.npy".format(index))).astype(float)[:time_limit]
action_sequence = self.parse_trajectory_actions(coordinate_trajectory)
return image, coordinate_trajectory, action_sequence
class ToyDataset(Dataset):
# Class implementing instance of dataset class for toy data.
def __init__(self, dataset_directory):
self.dataset_directory = dataset_directory
# For us, this is Research/Code/GraphPlanningNetworks/scripts/DatasetPlanning/CreateDemos/Demos2
self.x_path = os.path.join(self.dataset_directory,"X_array_actions.npy")
self.a_path = os.path.join(self.dataset_directory,"A_array_actions.npy")
self.X_array = np.load(self.x_path)
self.A_array = np.load(self.a_path)
def __len__(self):
return 50000
def __getitem__(self, index):
# Return trajectory and action sequence.
return self.X_array[index],self.A_array[index]
class ContinuousToyDataset(Dataset):
# Class implementing instance of dataset class for toy data.
def __init__(self, dataset_directory):
self.dataset_directory = dataset_directory
# For us, this is Research/Code/GraphPlanningNetworks/scripts/DatasetPlanning/CreateDemos/Demos2
self.x_path = os.path.join(self.dataset_directory,"X_array_continuous.npy")
self.a_path = os.path.join(self.dataset_directory,"A_array_continuous.npy")
self.y_path = os.path.join(self.dataset_directory,"Y_array_continuous.npy")
self.b_path = os.path.join(self.dataset_directory,"B_array_continuous.npy")
self.X_array = np.load(self.x_path)
self.A_array = np.load(self.a_path)
self.Y_array = np.load(self.y_path)
self.B_array = np.load(self.b_path)
def __len__(self):
return 50000
def __getitem__(self, index):
# Return trajectory and action sequence.
return self.X_array[index],self.A_array[index]
def get_latent_variables(self, index):
return self.B_array[index],self.Y_array[index]
class ContinuousDirectedToyDataset(Dataset):
# Class implementing instance of dataset class for toy data.
def __init__(self, dataset_directory):
self.dataset_directory = dataset_directory
# For us, this is Research/Code/GraphPlanningNetworks/scripts/DatasetPlanning/CreateDemos/Demos2
self.x_path = os.path.join(self.dataset_directory,"X_array_directed_continuous.npy")
self.a_path = os.path.join(self.dataset_directory,"A_array_directed_continuous.npy")
self.y_path = os.path.join(self.dataset_directory,"Y_array_directed_continuous.npy")
self.b_path = os.path.join(self.dataset_directory,"B_array_directed_continuous.npy")
self.X_array = np.load(self.x_path)
self.A_array = np.load(self.a_path)
self.Y_array = np.load(self.y_path)
self.B_array = np.load(self.b_path)
def __len__(self):
return 50000
def __getitem__(self, index):
# Return trajectory and action sequence.
return self.X_array[index],self.A_array[index]
def get_latent_variables(self, index):
return self.B_array[index],self.Y_array[index]
class ContinuousNonZeroToyDataset(Dataset):
# Class implementing instance of dataset class for toy data.
def __init__(self, dataset_directory):
self.dataset_directory = dataset_directory
# For us, this is Research/Code/GraphPlanningNetworks/scripts/DatasetPlanning/CreateDemos/Demos2
self.x_path = os.path.join(self.dataset_directory,"X_array_continuous_nonzero.npy")
self.a_path = os.path.join(self.dataset_directory,"A_array_continuous_nonzero.npy")
self.y_path = os.path.join(self.dataset_directory,"Y_array_continuous_nonzero.npy")
self.b_path = os.path.join(self.dataset_directory,"B_array_continuous_nonzero.npy")
self.X_array = np.load(self.x_path)
self.A_array = np.load(self.a_path)
self.Y_array = np.load(self.y_path)
self.B_array = np.load(self.b_path)
def __len__(self):
return 50000
def __getitem__(self, index):
# Return trajectory and action sequence.
return self.X_array[index],self.A_array[index]
def get_latent_variables(self, index):
return self.B_array[index],self.Y_array[index]
class ContinuousDirectedNonZeroToyDataset(Dataset):
# Class implementing instance of dataset class for toy data.
def __init__(self, dataset_directory):
self.dataset_directory = dataset_directory
# For us, this is Research/Code/GraphPlanningNetworks/scripts/DatasetPlanning/CreateDemos/Demos2
self.x_path = os.path.join(self.dataset_directory,"X_dir_cont_nonzero.npy")
self.a_path = os.path.join(self.dataset_directory,"A_dir_cont_nonzero.npy")
self.y_path = os.path.join(self.dataset_directory,"Y_dir_cont_nonzero.npy")
self.b_path = os.path.join(self.dataset_directory,"B_dir_cont_nonzero.npy")
self.g_path = os.path.join(self.dataset_directory,"G_dir_cont_nonzero.npy")
self.X_array = np.load(self.x_path)
self.A_array = np.load(self.a_path)
self.Y_array = np.load(self.y_path)
self.B_array = np.load(self.b_path)
self.G_array = np.load(self.g_path)
def __len__(self):
return 50000
def __getitem__(self, index):
# Return trajectory and action sequence.
return self.X_array[index],self.A_array[index]
def get_latent_variables(self, index):
return self.B_array[index],self.Y_array[index]
class GoalDirectedDataset(Dataset):
# Class implementing instance of dataset class for toy data.
def __init__(self, dataset_directory):
self.dataset_directory = dataset_directory
# For us, this is Research/Code/GraphPlanningNetworks/scripts/DatasetPlanning/CreateDemos/Demos2
self.x_path = os.path.join(self.dataset_directory,"X_goal_directed.npy")
self.a_path = os.path.join(self.dataset_directory,"A_goal_directed.npy")
self.y_path = os.path.join(self.dataset_directory,"Y_goal_directed.npy")
self.b_path = os.path.join(self.dataset_directory,"B_goal_directed.npy")
self.g_path = os.path.join(self.dataset_directory,"G_goal_directed.npy")
self.X_array = np.load(self.x_path)
self.A_array = np.load(self.a_path)
self.Y_array = np.load(self.y_path)
self.B_array = np.load(self.b_path)
self.G_array = np.load(self.g_path)
def __len__(self):
return 50000
def __getitem__(self, index):
# Return trajectory and action sequence.
return self.X_array[index],self.A_array[index]
def get_latent_variables(self, index):
return self.B_array[index],self.Y_array[index]
def get_goal(self, index):
return self.G_array[index]
class DeterministicGoalDirectedDataset(Dataset):
# Class implementing instance of dataset class for toy data.
def __init__(self, dataset_directory):
self.dataset_directory = dataset_directory
# For us, this is Research/Code/GraphPlanningNetworks/scripts/DatasetPlanning/CreateDemos/Demos2
self.x_path = os.path.join(self.dataset_directory,"X_deter_goal_directed.npy")
self.a_path = os.path.join(self.dataset_directory,"A_deter_goal_directed.npy")
self.y_path = os.path.join(self.dataset_directory,"Y_deter_goal_directed.npy")
self.b_path = os.path.join(self.dataset_directory,"B_deter_goal_directed.npy")
self.g_path = os.path.join(self.dataset_directory,"G_deter_goal_directed.npy")
self.X_array = np.load(self.x_path)
self.A_array = np.load(self.a_path)
self.Y_array = np.load(self.y_path)
self.B_array = np.load(self.b_path)
self.G_array = np.load(self.g_path)
self.goal_states = np.array([[-1,-1],[-1,1],[1,-1],[1,1]])*5
def __len__(self):
return 50000
def __getitem__(self, index):
# Return trajectory and action sequence.
return self.X_array[index],self.A_array[index]
def get_latent_variables(self, index):
return self.B_array[index],self.Y_array[index]
def get_goal(self, index):
return self.G_array[index]
def get_goal_position(self, index):
return self.goal_states[self.G_array[index]]
class SeparableDataset(Dataset):
# Class implementing instance of dataset class for toy data.
def __init__(self, dataset_directory):
self.dataset_directory = dataset_directory
# For us, this is Research/Code/GraphPlanningNetworks/scripts/DatasetPlanning/CreateDemos/Demos2
self.x_path = os.path.join(self.dataset_directory,"X_separable.npy")
self.a_path = os.path.join(self.dataset_directory,"A_separable.npy")
self.y_path = os.path.join(self.dataset_directory,"Y_separable.npy")
self.b_path = os.path.join(self.dataset_directory,"B_separable.npy")
self.g_path = os.path.join(self.dataset_directory,"G_separable.npy")
self.s_path = os.path.join(self.dataset_directory,"StartConfig_separable.npy")
self.X_array = np.load(self.x_path)
self.A_array = np.load(self.a_path)
self.Y_array = np.load(self.y_path)
self.B_array = np.load(self.b_path)
self.G_array = np.load(self.g_path)
self.S_array = np.load(self.s_path)
def __len__(self):
return 50000
def __getitem__(self, index):
# Return trajectory and action sequence.
return self.X_array[index],self.A_array[index]
def get_latent_variables(self, index):
return self.B_array[index],self.Y_array[index]
def get_goal(self, index):
return self.G_array[index]
def get_startconfig(self, index):
return self.S_array[index] | CausalSkillLearning-main | Experiments/DataLoaders.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from headers import *
from PolicyNetworks import *
from Visualizers import BaxterVisualizer, SawyerVisualizer, ToyDataVisualizer #, MocapVisualizer
import TFLogger, DMP, RLUtils
# Check if CUDA is available, set device to GPU if it is, otherwise use CPU.
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
class PolicyManager_BaseClass():
def __init__(self):
super(PolicyManager_BaseClass, self).__init__()
def setup(self):
# Fixing seeds.
np.random.seed(seed=0)
torch.manual_seed(0)
np.set_printoptions(suppress=True,precision=2)
self.create_networks()
self.create_training_ops()
# self.create_util_ops()
# self.initialize_gt_subpolicies()
if self.args.setting=='imitation':
extent = self.dataset.get_number_task_demos(self.demo_task_index)
if (self.args.setting=='transfer' and isinstance(self, PolicyManager_Transfer)) or \
(self.args.setting=='cycle_transfer' and isinstance(self, PolicyManager_CycleConsistencyTransfer)):
extent = self.extent
else:
extent = len(self.dataset)-self.test_set_size
self.index_list = np.arange(0,extent)
self.initialize_plots()
def initialize_plots(self):
if self.args.name is not None:
logdir = os.path.join(self.args.logdir, self.args.name)
if not(os.path.isdir(logdir)):
os.mkdir(logdir)
logdir = os.path.join(logdir, "logs")
if not(os.path.isdir(logdir)):
os.mkdir(logdir)
# Create TF Logger.
self.tf_logger = TFLogger.Logger(logdir)
else:
self.tf_logger = TFLogger.Logger()
if self.args.data=='MIME':
self.visualizer = BaxterVisualizer()
# self.state_dim = 16
elif self.args.data=='Roboturk' or self.args.data=='OrigRoboturk' or self.args.data=='FullRoboturk':
self.visualizer = SawyerVisualizer()
# self.state_dim = 8
elif self.args.data=='Mocap':
self.visualizer = MocapVisualizer(args=self.args)
else:
self.visualizer = ToyDataVisualizer()
self.rollout_gif_list = []
self.gt_gif_list = []
self.dir_name = os.path.join(self.args.logdir,self.args.name,"MEval")
if not(os.path.isdir(self.dir_name)):
os.mkdir(self.dir_name)
def write_and_close(self):
self.writer.export_scalars_to_json("./all_scalars.json")
self.writer.close()
def collect_inputs(self, i, get_latents=False):
if self.args.data=='DeterGoal':
sample_traj, sample_action_seq = self.dataset[i]
latent_b_seq, latent_z_seq = self.dataset.get_latent_variables(i)
start = 0
if self.args.traj_length>0:
sample_action_seq = sample_action_seq[start:self.args.traj_length-1]
latent_b_seq = latent_b_seq[start:self.args.traj_length-1]
latent_z_seq = latent_z_seq[start:self.args.traj_length-1]
sample_traj = sample_traj[start:self.args.traj_length]
else:
# Traj length is going to be -1 here.
# Don't need to modify action sequence because it does have to be one step less than traj_length anyway.
sample_action_seq = sample_action_seq[start:]
sample_traj = sample_traj[start:]
latent_b_seq = latent_b_seq[start:]
latent_z_seq = latent_z_seq[start:]
# The trajectory is going to be one step longer than the action sequence, because action sequences are constructed from state differences. Instead, truncate trajectory to length of action sequence.
# Now manage concatenated trajectory differently - {{s0,_},{s1,a0},{s2,a1},...,{sn,an-1}}.
concatenated_traj = self.concat_state_action(sample_traj, sample_action_seq)
old_concatenated_traj = self.old_concat_state_action(sample_traj, sample_action_seq)
if self.args.data=='DeterGoal':
self.conditional_information = np.zeros((self.args.condition_size))
self.conditional_information[self.dataset.get_goal(i)] = 1
self.conditional_information[4:] = self.dataset.get_goal_position[i]
else:
self.conditional_information = np.zeros((self.args.condition_size))
if get_latents:
return sample_traj, sample_action_seq, concatenated_traj, old_concatenated_traj, latent_b_seq, latent_z_seq
else:
return sample_traj, sample_action_seq, concatenated_traj, old_concatenated_traj
elif self.args.data=='MIME' or self.args.data=='Roboturk' or self.args.data=='OrigRoboturk' or self.args.data=='FullRoboturk' or self.args.data=='Mocap':
# If we're imitating... select demonstrations from the particular task.
if self.args.setting=='imitation' and self.args.data=='Roboturk':
data_element = self.dataset.get_task_demo(self.demo_task_index, i)
else:
data_element = self.dataset[i]
if not(data_element['is_valid']):
return None, None, None, None
trajectory = data_element['demo']
# If normalization is set to some value.
if self.args.normalization=='meanvar' or self.args.normalization=='minmax':
trajectory = (trajectory-self.norm_sub_value)/self.norm_denom_value
action_sequence = np.diff(trajectory,axis=0)
self.current_traj_len = len(trajectory)
if self.args.data=='MIME':
self.conditional_information = np.zeros((self.conditional_info_size))
elif self.args.data=='Roboturk' or self.args.data=='OrigRoboturk' or self.args.data=='FullRoboturk':
robot_states = data_element['robot-state']
object_states = data_element['object-state']
self.conditional_information = np.zeros((self.conditional_info_size))
# Don't set this if pretraining / baseline.
if self.args.setting=='learntsub' or self.args.setting=='imitation':
self.conditional_information = np.zeros((len(trajectory),self.conditional_info_size))
self.conditional_information[:,:self.cond_robot_state_size] = robot_states
# Doing this instead of self.cond_robot_state_size: because the object_states size varies across demonstrations.
self.conditional_information[:,self.cond_robot_state_size:self.cond_robot_state_size+object_states.shape[-1]] = object_states
# Setting task ID too.
self.conditional_information[:,-self.number_tasks+data_element['task-id']] = 1.
# Concatenate
concatenated_traj = self.concat_state_action(trajectory, action_sequence)
old_concatenated_traj = self.old_concat_state_action(trajectory, action_sequence)
if self.args.setting=='imitation':
action_sequence = RLUtils.resample(data_element['demonstrated_actions'],len(trajectory))
concatenated_traj = np.concatenate([trajectory, action_sequence],axis=1)
return trajectory, action_sequence, concatenated_traj, old_concatenated_traj
def train(self, model=None):
if model:
print("Loading model in training.")
self.load_all_models(model)
counter = self.args.initial_counter_value
# For number of training epochs.
for e in range(self.number_epochs):
self.current_epoch_running = e
print("Starting Epoch: ",e)
if e%self.args.save_freq==0:
self.save_all_models("epoch{0}".format(e))
np.random.shuffle(self.index_list)
if self.args.debug:
print("Embedding in Outer Train Function.")
embed()
# For every item in the epoch:
if self.args.setting=='imitation':
extent = self.dataset.get_number_task_demos(self.demo_task_index)
if self.args.setting=='transfer' or self.args.setting=='cycle_transfer':
extent = self.extent
else:
extent = len(self.dataset)-self.test_set_size
for i in range(extent):
print("Epoch: ",e," Trajectory:",i, "Datapoint: ", self.index_list[i])
self.run_iteration(counter, self.index_list[i])
counter = counter+1
if e%self.args.eval_freq==0:
self.automatic_evaluation(e)
self.write_and_close()
def automatic_evaluation(self, e):
# Writing new automatic evaluation that parses arguments and creates an identical command loading the appropriate model.
# Note: If the initial command loads a model, ignore that.
command_args = self.args._get_kwargs()
base_command = 'python Master.py --train=0 --model={0}'.format("Experiment_Logs/{0}/saved_models/Model_epoch{1}".format(self.args.name, e))
if self.args.data=='Mocap':
base_command = './xvfb-run-safe ' + base_command
# For every argument in the command arguments, add it to the base command with the value used, unless it's train or model.
for ar in command_args:
# Skip model and train, because we need to set these manually.
if ar[0]=='model' or ar[0]=='train':
pass
# Add the rest
else:
base_command = base_command + ' --{0}={1}'.format(ar[0],ar[1])
cluster_command = 'python cluster_run.py --partition=learnfair --name={0}_Eval --cmd=\'{1}\''.format(self.args.name, base_command)
subprocess.call([cluster_command],shell=True)
def visualize_robot_data(self):
self.N = 100
self.rollout_timesteps = self.args.traj_length
if self.args.data=='MIME':
self.visualizer = BaxterVisualizer()
# self.state_dim = 16
elif self.args.data=='Roboturk' or self.args.data=='OrigRoboturk' or self.args.data=='FullRoboturk':
self.visualizer = SawyerVisualizer()
# self.state_dim = 8
elif self.args.data=='Mocap':
self.visualizer = MocapVisualizer(args=self.args)
# Because there are just more invalid DP's in Mocap.
self.N = 100
else:
self.visualizer = ToyDataVisualizer()
self.latent_z_set = np.zeros((self.N,self.latent_z_dimensionality))
# These are lists because they're variable length individually.
self.indices = []
self.trajectory_set = []
self.trajectory_rollout_set = []
model_epoch = int(os.path.split(self.args.model)[1].lstrip("Model_epoch"))
self.rollout_gif_list = []
self.gt_gif_list = []
# Create save directory:
upper_dir_name = os.path.join(self.args.logdir,self.args.name,"MEval")
if not(os.path.isdir(upper_dir_name)):
os.mkdir(upper_dir_name)
self.dir_name = os.path.join(self.args.logdir,self.args.name,"MEval","m{0}".format(model_epoch))
if not(os.path.isdir(self.dir_name)):
os.mkdir(self.dir_name)
self.max_len = 0
for i in range(self.N):
print("#########################################")
print("Getting visuals for trajectory: ",i)
latent_z, sample_traj, sample_action_seq = self.run_iteration(0, i, return_z=True)
if latent_z is not None:
self.indices.append(i)
if len(sample_traj)>self.max_len:
self.max_len = len(sample_traj)
self.latent_z_set[i] = copy.deepcopy(latent_z.detach().cpu().numpy())
trajectory_rollout = self.get_robot_visuals(i, latent_z, sample_traj)
# self.trajectory_set[i] = copy.deepcopy(sample_traj)
# self.trajectory_rollout_set[i] = copy.deepcopy(trajectory_rollout)
self.trajectory_set.append(copy.deepcopy(sample_traj))
self.trajectory_rollout_set.append(copy.deepcopy(trajectory_rollout))
# Get MIME embedding for rollout and GT trajectories, with same Z embedding.
embedded_z = self.get_robot_embedding()
gt_animation_object = self.visualize_robot_embedding(embedded_z, gt=True)
rollout_animation_object = self.visualize_robot_embedding(embedded_z, gt=False)
self.write_embedding_HTML(gt_animation_object,prefix="GT")
self.write_embedding_HTML(rollout_animation_object,prefix="Rollout")
# Save webpage.
self.write_results_HTML()
def rollout_robot_trajectory(self, trajectory_start, latent_z, rollout_length=None):
subpolicy_inputs = torch.zeros((1,2*self.state_dim+self.latent_z_dimensionality)).to(device).float()
subpolicy_inputs[0,:self.state_dim] = torch.tensor(trajectory_start).to(device).float()
subpolicy_inputs[:,2*self.state_dim:] = torch.tensor(latent_z).to(device).float()
if rollout_length is not None:
length = rollout_length-1
else:
length = self.rollout_timesteps-1
for t in range(length):
actions = self.policy_network.get_actions(subpolicy_inputs, greedy=True)
# Select last action to execute.
action_to_execute = actions[-1].squeeze(1)
# Downscale the actions by action_scale_factor.
action_to_execute = action_to_execute/self.args.action_scale_factor
# Compute next state.
new_state = subpolicy_inputs[t,:self.state_dim]+action_to_execute
# New input row.
input_row = torch.zeros((1,2*self.state_dim+self.latent_z_dimensionality)).to(device).float()
input_row[0,:self.state_dim] = new_state
# Feed in the ORIGINAL prediction from the network as input. Not the downscaled thing.
input_row[0,self.state_dim:2*self.state_dim] = actions[-1].squeeze(1)
input_row[0,2*self.state_dim:] = latent_z
subpolicy_inputs = torch.cat([subpolicy_inputs,input_row],dim=0)
trajectory = subpolicy_inputs[:,:self.state_dim].detach().cpu().numpy()
return trajectory
def get_robot_visuals(self, i, latent_z, trajectory, return_image=False):
# 1) Feed Z into policy, rollout trajectory.
trajectory_rollout = self.rollout_robot_trajectory(trajectory[0], latent_z, rollout_length=trajectory.shape[0])
# 2) Unnormalize data.
if self.args.normalization=='meanvar' or self.args.normalization=='minmax':
unnorm_gt_trajectory = (trajectory*self.norm_denom_value)+self.norm_sub_value
unnorm_pred_trajectory = (trajectory_rollout*self.norm_denom_value) + self.norm_sub_value
else:
unnorm_gt_trajectory = trajectory
unnorm_pred_trajectory = trajectory_rollout
if self.args.data=='Mocap':
# Get animation object from dataset.
animation_object = self.dataset[i]['animation']
# 3) Run unnormalized ground truth trajectory in visualizer.
ground_truth_gif = self.visualizer.visualize_joint_trajectory(unnorm_gt_trajectory, gif_path=self.dir_name, gif_name="Traj_{0}_GT.gif".format(i), return_and_save=True)
# 4) Run unnormalized rollout trajectory in visualizer.
rollout_gif = self.visualizer.visualize_joint_trajectory(unnorm_pred_trajectory, gif_path=self.dir_name, gif_name="Traj_{0}_Rollout.gif".format(i), return_and_save=True)
self.gt_gif_list.append(copy.deepcopy(ground_truth_gif))
self.rollout_gif_list.append(copy.deepcopy(rollout_gif))
if self.args.normalization=='meanvar' or self.args.normalization=='minmax':
if return_image:
return unnorm_pred_trajectory, ground_truth_gif, rollout_gif
else:
return unnorm_pred_trajectory
else:
if return_image:
return trajectory_rollout, ground_truth_gif, rollout_gif
else:
return trajectory_rollout
def write_results_HTML(self):
# Retrieve, append, and print images from datapoints across different models.
print("Writing HTML File.")
# Open Results HTML file.
with open(os.path.join(self.dir_name,'Results_{}.html'.format(self.args.name)),'w') as html_file:
# Start HTML doc.
html_file.write('<html>')
html_file.write('<body>')
html_file.write('<p> Model: {0}</p>'.format(self.args.name))
html_file.write('<p> Average Trajectory Distance: {0}</p>'.format(self.mean_distance))
for i in range(self.N):
if i%100==0:
print("Datapoint:",i)
html_file.write('<p> <b> Trajectory {} </b></p>'.format(i))
file_prefix = self.dir_name
# Create gif_list by prefixing base_gif_list with file prefix.
html_file.write('<div style="display: flex; justify-content: row;"> <img src="Traj_{0}_GT.gif"/> <img src="Traj_{0}_Rollout.gif"/> </div>'.format(i))
# Add gap space.
html_file.write('<p> </p>')
html_file.write('</body>')
html_file.write('</html>')
def write_embedding_HTML(self, animation_object, prefix=""):
print("Writing Embedding File.")
t1 = time.time()
# Open Results HTML file.
with open(os.path.join(self.dir_name,'Embedding_{0}_{1}.html'.format(prefix,self.args.name)),'w') as html_file:
# Start HTML doc.
html_file.write('<html>')
html_file.write('<body>')
html_file.write('<p> Model: {0}</p>'.format(self.args.name))
html_file.write(animation_object.to_html5_video())
# print(animation_object.to_html5_video(), file=html_file)
html_file.write('</body>')
html_file.write('</html>')
animation_object.save(os.path.join(self.dir_name,'{0}_Embedding_Video.mp4'.format(self.args.name)))
# animation_object.save(os.path.join(self.dir_name,'{0}_Embedding_Video.mp4'.format(self.args.name)), writer='imagemagick')
t2 = time.time()
print("Time taken to write this embedding: ",t2-t1)
def get_robot_embedding(self, return_tsne_object=False):
# Mean and variance normalize z.
mean = self.latent_z_set.mean(axis=0)
std = self.latent_z_set.std(axis=0)
normed_z = (self.latent_z_set-mean)/std
tsne = skl_manifold.TSNE(n_components=2,random_state=0,perplexity=self.args.perplexity)
embedded_zs = tsne.fit_transform(normed_z)
scale_factor = 1
scaled_embedded_zs = scale_factor*embedded_zs
if return_tsne_object:
return scaled_embedded_zs, tsne
else:
return scaled_embedded_zs
def visualize_robot_embedding(self, scaled_embedded_zs, gt=False):
# Create figure and axis objects.
matplotlib.rcParams['figure.figsize'] = [50, 50]
fig, ax = plt.subplots()
# number_samples = 400
number_samples = self.N
# Create a scatter plot of the embedding itself. The plot does not seem to work without this.
ax.scatter(scaled_embedded_zs[:number_samples,0],scaled_embedded_zs[:number_samples,1])
ax.axis('off')
ax.set_title("Embedding of Latent Representation of our Model",fontdict={'fontsize':40})
artists = []
# For number of samples in TSNE / Embedding, create a Image object for each of them.
for i in range(len(self.indices)):
if i%10==0:
print(i)
# Create offset image (so that we can place it where we choose), with specific zoom.
if gt:
imagebox = OffsetImage(self.gt_gif_list[i][0],zoom=0.4)
else:
imagebox = OffsetImage(self.rollout_gif_list[i][0],zoom=0.4)
# Create an annotation box to put the offset image into. specify offset image, position, and disable bounding frame.
ab = AnnotationBbox(imagebox, (scaled_embedded_zs[self.indices[i],0], scaled_embedded_zs[self.indices[i],1]), frameon=False)
# Add the annotation box artist to the list artists.
artists.append(ax.add_artist(ab))
def update(t):
# for i in range(number_samples):
for i in range(len(self.indices)):
if gt:
imagebox = OffsetImage(self.gt_gif_list[i][min(t, len(self.gt_gif_list[i])-1)],zoom=0.4)
else:
imagebox = OffsetImage(self.rollout_gif_list[i][min(t, len(self.rollout_gif_list[i])-1)],zoom=0.4)
ab = AnnotationBbox(imagebox, (scaled_embedded_zs[self.indices[i],0], scaled_embedded_zs[self.indices[i],1]), frameon=False)
artists.append(ax.add_artist(ab))
# update_len = 20
anim = FuncAnimation(fig, update, frames=np.arange(0, self.max_len), interval=200)
return anim
class PolicyManager_Pretrain(PolicyManager_BaseClass):
def __init__(self, number_policies=4, dataset=None, args=None):
if args.setting=='imitation':
super(PolicyManager_Pretrain, self).__init__(number_policies=number_policies, dataset=dataset, args=args)
else:
super(PolicyManager_Pretrain, self).__init__()
self.args = args
self.data = self.args.data
# Not used if discrete_z is false.
self.number_policies = number_policies
self.dataset = dataset
# Global input size: trajectory at every step - x,y,action
# Inputs is now states and actions.
# Model size parameters
# if self.args.data=='Continuous' or self.args.data=='ContinuousDir' or self.args.data=='ContinuousNonZero' or self.args.data=='ContinuousDirNZ' or self.args.data=='GoalDirected' or self.args.data=='Separable':
self.state_size = 2
self.state_dim = 2
self.input_size = 2*self.state_size
self.hidden_size = self.args.hidden_size
# Number of actions
self.output_size = 2
self.latent_z_dimensionality = self.args.z_dimensions
self.number_layers = self.args.number_layers
self.traj_length = 5
self.number_epochs = self.args.epochs
self.test_set_size = 500
if self.args.data=='MIME':
self.state_size = 16
self.state_dim = 16
self.input_size = 2*self.state_size
self.hidden_size = self.args.hidden_size
self.output_size = self.state_size
self.latent_z_dimensionality = self.args.z_dimensions
self.number_layers = self.args.number_layers
self.traj_length = self.args.traj_length
self.number_epochs = self.args.epochs
if self.args.normalization=='meanvar':
self.norm_sub_value = np.load("Statistics/MIME_Means.npy")
self.norm_denom_value = np.load("Statistics/MIME_Var.npy")
elif self.args.normalization=='minmax':
self.norm_sub_value = np.load("Statistics/MIME_Min.npy")
self.norm_denom_value = np.load("Statistics/MIME_Max.npy") - self.norm_sub_value
# Max of robot_state + object_state sizes across all Baxter environments.
self.cond_robot_state_size = 60
self.cond_object_state_size = 25
self.conditional_info_size = self.cond_robot_state_size+self.cond_object_state_size
elif self.args.data=='Roboturk' or self.args.data=='OrigRoboturk' or self.args.data=='FullRoboturk':
if self.args.gripper:
self.state_size = 8
self.state_dim = 8
else:
self.state_size = 7
self.state_dim = 7
self.input_size = 2*self.state_size
self.hidden_size = self.args.hidden_size
self.output_size = self.state_size
self.number_layers = self.args.number_layers
self.traj_length = self.args.traj_length
if self.args.normalization=='meanvar':
self.norm_sub_value = np.load("Statistics/Roboturk_Mean.npy")
self.norm_denom_value = np.load("Statistics/Roboturk_Var.npy")
elif self.args.normalization=='minmax':
self.norm_sub_value = np.load("Statistics/Roboturk_Min.npy")
self.norm_denom_value = np.load("Statistics/Roboturk_Max.npy") - self.norm_sub_value
# Max of robot_state + object_state sizes across all sawyer environments.
# Robot size always 30. Max object state size is... 23.
self.cond_robot_state_size = 30
self.cond_object_state_size = 23
self.conditional_info_size = self.cond_robot_state_size+self.cond_object_state_size
elif self.args.data=='Mocap':
self.state_size = 22*3
self.state_dim = 22*3
self.input_size = 2*self.state_size
self.hidden_size = self.args.hidden_size
self.output_size = self.state_size
self.traj_length = self.args.traj_length
self.conditional_info_size = 0
# Training parameters.
self.baseline_value = 0.
self.beta_decay = 0.9
self. learning_rate = self.args.learning_rate
self.initial_epsilon = self.args.epsilon_from
self.final_epsilon = self.args.epsilon_to
self.decay_epochs = self.args.epsilon_over
self.decay_counter = self.decay_epochs*len(self.dataset)
# Log-likelihood penalty.
self.lambda_likelihood_penalty = self.args.likelihood_penalty
self.baseline = None
# Per step decay.
self.decay_rate = (self.initial_epsilon-self.final_epsilon)/(self.decay_counter)
def create_networks(self):
# Create K Policy Networks.
# This policy network automatically manages input size.
if self.args.discrete_z:
self.policy_network = ContinuousPolicyNetwork(self.input_size, self.hidden_size, self.output_size, self.number_policies, self.number_layers).to(device)
else:
# self.policy_network = ContinuousPolicyNetwork(self.input_size, self.hidden_size, self.output_size, self.latent_z_dimensionality, self.number_layers).to(device)
self.policy_network = ContinuousPolicyNetwork(self.input_size, self.hidden_size, self.output_size, self.args, self.number_layers).to(device)
# Create encoder.
if self.args.discrete_z:
# The latent space is just one of 4 z's. So make output of encoder a one hot vector.
self.encoder_network = EncoderNetwork(self.input_size, self.hidden_size, self.number_policies).to(device)
else:
# self.encoder_network = ContinuousEncoderNetwork(self.input_size, self.hidden_size, self.latent_z_dimensionality).to(device)
# if self.args.transformer:
# self.encoder_network = TransformerEncoder(self.input_size, self.hidden_size, self.latent_z_dimensionality, self.args).to(device)
# else:
self.encoder_network = ContinuousEncoderNetwork(self.input_size, self.hidden_size, self.latent_z_dimensionality, self.args).to(device)
def create_training_ops(self):
# self.negative_log_likelihood_loss_function = torch.nn.NLLLoss()
self.negative_log_likelihood_loss_function = torch.nn.NLLLoss(reduction='none')
self.KLDivergence_loss_function = torch.nn.KLDivLoss(reduction='none')
# Only need one object of the NLL loss function for the latent net.
# These are loss functions. You still instantiate the loss function every time you evaluate it on some particular data.
# When you instantiate it, you call backward on that instantiation. That's why you know what loss to optimize when computing gradients.
if self.args.train_only_policy:
self.parameter_list = self.policy_network.parameters()
else:
self.parameter_list = list(self.policy_network.parameters()) + list(self.encoder_network.parameters())
self.optimizer = torch.optim.Adam(self.parameter_list,lr=self.learning_rate)
def save_all_models(self, suffix):
logdir = os.path.join(self.args.logdir, self.args.name)
savedir = os.path.join(logdir,"saved_models")
if not(os.path.isdir(savedir)):
os.mkdir(savedir)
save_object = {}
save_object['Policy_Network'] = self.policy_network.state_dict()
save_object['Encoder_Network'] = self.encoder_network.state_dict()
torch.save(save_object,os.path.join(savedir,"Model_"+suffix))
def load_all_models(self, path, only_policy=False, just_subpolicy=False):
load_object = torch.load(path)
if self.args.train_only_policy and self.args.train:
self.encoder_network.load_state_dict(load_object['Encoder_Network'])
else:
self.policy_network.load_state_dict(load_object['Policy_Network'])
if not(only_policy):
self.encoder_network.load_state_dict(load_object['Encoder_Network'])
def set_epoch(self, counter):
if self.args.train:
if counter<self.decay_counter:
self.epsilon = self.initial_epsilon-self.decay_rate*counter
else:
self.epsilon = self.final_epsilon
else:
self.epsilon = self.final_epsilon
def visualize_trajectory(self, traj, no_axes=False):
fig = plt.figure()
ax = fig.gca()
ax.scatter(traj[:,0],traj[:,1],c=range(len(traj)),cmap='jet')
plt.xlim(-10,10)
plt.ylim(-10,10)
if no_axes:
plt.axis('off')
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8).reshape(int(height), int(width), 3)
image = np.transpose(image, axes=[2,0,1])
return image
def update_plots(self, counter, loglikelihood, sample_traj):
self.tf_logger.scalar_summary('Subpolicy Likelihood', loglikelihood.mean(), counter)
self.tf_logger.scalar_summary('Total Loss', self.total_loss.mean(), counter)
self.tf_logger.scalar_summary('Encoder KL', self.encoder_KL.mean(), counter)
if not(self.args.reparam):
self.tf_logger.scalar_summary('Baseline', self.baseline.sum(), counter)
self.tf_logger.scalar_summary('Encoder Loss', self.encoder_loss.sum(), counter)
self.tf_logger.scalar_summary('Reinforce Encoder Loss', self.reinforce_encoder_loss.sum(), counter)
self.tf_logger.scalar_summary('Total Encoder Loss', self.total_encoder_loss.sum() ,counter)
if self.args.entropy:
self.tf_logger.scalar_summary('SubPolicy Entropy', torch.mean(subpolicy_entropy), counter)
if counter%self.args.display_freq==0:
self.tf_logger.image_summary("GT Trajectory",self.visualize_trajectory(sample_traj), counter)
def assemble_inputs(self, input_trajectory, latent_z_indices, latent_b, sample_action_seq):
if self.args.discrete_z:
# Append latent z indices to sample_traj data to feed as input to BOTH the latent policy network and the subpolicy network.
assembled_inputs = torch.zeros((len(input_trajectory),self.input_size+self.number_policies+1)).to(device)
assembled_inputs[:,:self.input_size] = torch.tensor(input_trajectory).view(len(input_trajectory),self.input_size).to(device).float()
assembled_inputs[range(1,len(input_trajectory)),self.input_size+latent_z_indices[:-1].long()] = 1.
assembled_inputs[range(1,len(input_trajectory)),-1] = latent_b[:-1].float()
# Now assemble inputs for subpolicy.
subpolicy_inputs = torch.zeros((len(input_trajectory),self.input_size+self.number_policies)).to(device)
subpolicy_inputs[:,:self.input_size] = torch.tensor(input_trajectory).view(len(input_trajectory),self.input_size).to(device).float()
subpolicy_inputs[range(len(input_trajectory)),self.input_size+latent_z_indices.long()] = 1.
# subpolicy_inputs[range(len(input_trajectory)),-1] = latent_b.float()
# # Concatenated action sqeuence for policy network.
padded_action_seq = np.concatenate([sample_action_seq,np.zeros((1,self.output_size))],axis=0)
return assembled_inputs, subpolicy_inputs, padded_action_seq
else:
# Append latent z indices to sample_traj data to feed as input to BOTH the latent policy network and the subpolicy network.
assembled_inputs = torch.zeros((len(input_trajectory),self.input_size+self.latent_z_dimensionality+1)).to(device)
assembled_inputs[:,:self.input_size] = torch.tensor(input_trajectory).view(len(input_trajectory),self.input_size).to(device).float()
assembled_inputs[range(1,len(input_trajectory)),self.input_size:-1] = latent_z_indices[:-1]
assembled_inputs[range(1,len(input_trajectory)),-1] = latent_b[:-1].float()
# Now assemble inputs for subpolicy.
subpolicy_inputs = torch.zeros((len(input_trajectory),self.input_size+self.latent_z_dimensionality)).to(device)
subpolicy_inputs[:,:self.input_size] = torch.tensor(input_trajectory).view(len(input_trajectory),self.input_size).to(device).float()
subpolicy_inputs[range(len(input_trajectory)),self.input_size:] = latent_z_indices
# subpolicy_inputs[range(len(input_trajectory)),-1] = latent_b.float()
# # Concatenated action sequence for policy network's forward / logprobabilities function.
# padded_action_seq = np.concatenate([np.zeros((1,self.output_size)),sample_action_seq],axis=0)
padded_action_seq = np.concatenate([sample_action_seq,np.zeros((1,self.output_size))],axis=0)
return assembled_inputs, subpolicy_inputs, padded_action_seq
def concat_state_action(self, sample_traj, sample_action_seq):
# Add blank to start of action sequence and then concatenate.
sample_action_seq = np.concatenate([np.zeros((1,self.output_size)),sample_action_seq],axis=0)
# Currently returns:
# s0, s1, s2, s3, ..., sn-1, sn
# _, a0, a1, a2, ..., an_1, an
return np.concatenate([sample_traj, sample_action_seq],axis=-1)
def old_concat_state_action(self, sample_traj, sample_action_seq):
sample_action_seq = np.concatenate([sample_action_seq, np.zeros((1,self.output_size))],axis=0)
return np.concatenate([sample_traj, sample_action_seq],axis=-1)
def get_trajectory_segment(self, i):
if self.args.data=='Continuous' or self.args.data=='ContinuousDir' or self.args.data=='ContinuousNonZero' or self.args.data=='ContinuousDirNZ' or self.args.data=='GoalDirected' or self.args.data=='Separable':
# Sample trajectory segment from dataset.
sample_traj, sample_action_seq = self.dataset[i]
# Subsample trajectory segment.
start_timepoint = np.random.randint(0,self.args.traj_length-self.traj_length)
end_timepoint = start_timepoint + self.traj_length
# The trajectory is going to be one step longer than the action sequence, because action sequences are constructed from state differences. Instead, truncate trajectory to length of action sequence.
sample_traj = sample_traj[start_timepoint:end_timepoint]
sample_action_seq = sample_action_seq[start_timepoint:end_timepoint-1]
self.current_traj_len = self.traj_length
# Now manage concatenated trajectory differently - {{s0,_},{s1,a0},{s2,a1},...,{sn,an-1}}.
concatenated_traj = self.concat_state_action(sample_traj, sample_action_seq)
return concatenated_traj, sample_action_seq, sample_traj
elif self.args.data=='MIME' or self.args.data=='Roboturk' or self.args.data=='OrigRoboturk' or self.args.data=='FullRoboturk' or self.args.data=='Mocap':
data_element = self.dataset[i]
# If Invalid.
if not(data_element['is_valid']):
return None, None, None
# if self.args.data=='MIME':
# # Sample a trajectory length that's valid.
# trajectory = np.concatenate([data_element['la_trajectory'],data_element['ra_trajectory'],data_element['left_gripper'].reshape((-1,1)),data_element['right_gripper'].reshape((-1,1))],axis=-1)
# elif self.args.data=='Roboturk':
# trajectory = data_element['demo']
if self.args.gripper:
trajectory = data_element['demo']
else:
trajectory = data_element['demo'][:,:-1]
# If allowing variable skill length, set length for this sample.
if self.args.var_skill_length:
# Choose length of 12-16 with certain probabilities.
self.current_traj_len = np.random.choice([12,13,14,15,16],p=[0.1,0.2,0.4,0.2,0.1])
else:
self.current_traj_len = self.traj_length
# Sample random start point.
if trajectory.shape[0]>self.current_traj_len:
bias_length = int(self.args.pretrain_bias_sampling*trajectory.shape[0])
# Probability with which to sample biased segment:
sample_biased_segment = np.random.binomial(1,p=self.args.pretrain_bias_sampling_prob)
# If we want to bias sampling of trajectory segments towards the middle of the trajectory, to increase proportion of trajectory segments
# that are performing motions apart from reaching and returning.
# Sample a biased segment if trajectory length is sufficient, and based on probability of sampling.
if ((trajectory.shape[0]-2*bias_length)>self.current_traj_len) and sample_biased_segment:
start_timepoint = np.random.randint(bias_length, trajectory.shape[0] - self.current_traj_len - bias_length)
else:
start_timepoint = np.random.randint(0,trajectory.shape[0]-self.current_traj_len)
end_timepoint = start_timepoint + self.current_traj_len
# Get trajectory segment and actions.
trajectory = trajectory[start_timepoint:end_timepoint]
# If normalization is set to some value.
if self.args.normalization=='meanvar' or self.args.normalization=='minmax':
trajectory = (trajectory-self.norm_sub_value)/self.norm_denom_value
# CONDITIONAL INFORMATION for the encoder...
if self.args.data=='MIME' or self.args.data=='Mocap':
pass
elif self.args.data=='Roboturk' or self.args.data=='OrigRoboturk' or self.args.data=='FullRoboturk':
# robot_states = data_element['robot-state'][start_timepoint:end_timepoint]
# object_states = data_element['object-state'][start_timepoint:end_timepoint]
pass
# self.conditional_information = np.zeros((len(trajectory),self.conditional_info_size))
# self.conditional_information[:,:self.cond_robot_state_size] = robot_states
# self.conditional_information[:,self.cond_robot_state_size:object_states.shape[-1]] = object_states
# conditional_info = np.concatenate([robot_states,object_states],axis=1)
else:
return None, None, None
action_sequence = np.diff(trajectory,axis=0)
# Concatenate
concatenated_traj = self.concat_state_action(trajectory, action_sequence)
# NOW SCALE THIS ACTION SEQUENCE BY SOME FACTOR:
scaled_action_sequence = self.args.action_scale_factor*action_sequence
return concatenated_traj, scaled_action_sequence, trajectory
def construct_dummy_latents(self, latent_z):
if self.args.discrete_z:
latent_z_indices = latent_z.float()*torch.ones((self.traj_length)).to(device).float()
else:
# This construction should work irrespective of reparam or not.
latent_z_indices = torch.cat([latent_z.squeeze(0) for i in range(self.current_traj_len)],dim=0)
# Setting latent_b's to 00001.
# This is just a dummy value.
# latent_b = torch.ones((5)).to(device).float()
latent_b = torch.zeros((self.current_traj_len)).to(device).float()
# latent_b[-1] = 1.
return latent_z_indices, latent_b
# return latent_z_indices
def update_policies_reparam(self, loglikelihood, latent_z, encoder_KL):
self.optimizer.zero_grad()
# Losses computed as sums.
# self.likelihood_loss = -loglikelihood.sum()
# self.encoder_KL = encoder_KL.sum()
# Instead of summing losses, we should try taking the mean of the losses, so we can avoid running into issues of variable timesteps and stuff like that.
# We should also consider training with randomly sampled number of timesteps.
self.likelihood_loss = -loglikelihood.mean()
self.encoder_KL = encoder_KL.mean()
self.total_loss = (self.likelihood_loss + self.args.kl_weight*self.encoder_KL)
if self.args.debug:
print("Embedding in Update subpolicies.")
embed()
self.total_loss.backward()
self.optimizer.step()
def rollout_visuals(self, i, latent_z=None, return_traj=False):
# Initialize states and latent_z, etc.
# For t in range(number timesteps):
# # Retrieve action by feeding input to policy.
# # Step in environment with action.
# # Update inputs with new state and previously executed action.
self.state_dim = 2
self.rollout_timesteps = 5
start_state = torch.zeros((self.state_dim))
if self.args.discrete_z:
# Assuming 4 discrete subpolicies, just set subpolicy input to 1 at the latent_z index == i.
subpolicy_inputs = torch.zeros((1,self.input_size+self.number_policies)).to(device).float()
subpolicy_inputs[0,self.input_size+i] = 1.
else:
subpolicy_inputs = torch.zeros((1,self.input_size+self.latent_z_dimensionality)).to(device)
subpolicy_inputs[0,self.input_size:] = latent_z
subpolicy_inputs[0,:self.state_dim] = start_state
# subpolicy_inputs[0,-1] = 1.
for t in range(self.rollout_timesteps-1):
actions = self.policy_network.get_actions(subpolicy_inputs,greedy=True)
# Select last action to execute.
action_to_execute = actions[-1].squeeze(1)
# Downscale the actions by action_scale_factor.
action_to_execute = action_to_execute/self.args.action_scale_factor
# Compute next state.
new_state = subpolicy_inputs[t,:self.state_dim]+action_to_execute
# New input row:
if self.args.discrete_z:
input_row = torch.zeros((1,self.input_size+self.number_policies)).to(device).float()
input_row[0,self.input_size+i] = 1.
else:
input_row = torch.zeros((1,self.input_size+self.latent_z_dimensionality)).to(device).float()
input_row[0,self.input_size:] = latent_z
input_row[0,:self.state_dim] = new_state
input_row[0,self.state_dim:2*self.state_dim] = action_to_execute
# input_row[0,-1] = 1.
subpolicy_inputs = torch.cat([subpolicy_inputs,input_row],dim=0)
# print("latent_z:",latent_z)
trajectory_rollout = subpolicy_inputs[:,:self.state_dim].detach().cpu().numpy()
# print("Trajectory:",trajectory_rollout)
if return_traj:
return trajectory_rollout
def run_iteration(self, counter, i, return_z=False, and_train=True):
# Basic Training Algorithm:
# For E epochs:
# # For all trajectories:
# # Sample trajectory segment from dataset.
# # Encode trajectory segment into latent z.
# # Feed latent z and trajectory segment into policy network and evaluate likelihood.
# # Update parameters.
self.set_epoch(counter)
############# (0) #############
# Sample trajectory segment from dataset.
if self.args.traj_segments:
trajectory_segment, sample_action_seq, sample_traj = self.get_trajectory_segment(i)
else:
sample_traj, sample_action_seq, concatenated_traj, old_concatenated_traj = self.collect_inputs(i)
# Calling it trajectory segment, but it's not actually a trajectory segment here.
trajectory_segment = concatenated_traj
if trajectory_segment is not None:
############# (1) #############
torch_traj_seg = torch.tensor(trajectory_segment).to(device).float()
# Encode trajectory segment into latent z.
latent_z, encoder_loglikelihood, encoder_entropy, kl_divergence = self.encoder_network.forward(torch_traj_seg, self.epsilon)
########## (2) & (3) ##########
# Feed latent z and trajectory segment into policy network and evaluate likelihood.
latent_z_seq, latent_b = self.construct_dummy_latents(latent_z)
_, subpolicy_inputs, sample_action_seq = self.assemble_inputs(trajectory_segment, latent_z_seq, latent_b, sample_action_seq)
# Policy net doesn't use the decay epislon. (Because we never sample from it in training, only rollouts.)
loglikelihoods, _ = self.policy_network.forward(subpolicy_inputs, sample_action_seq)
loglikelihood = loglikelihoods[:-1].mean()
if self.args.debug:
print("Embedding in Train.")
embed()
############# (3) #############
# Update parameters.
if self.args.train and and_train:
# If we are regularizing:
# (1) Sample another z.
# (2) Construct inputs and such.
# (3) Compute distances, and feed to update_policies.
regularization_kl = None
z_distance = None
self.update_policies_reparam(loglikelihood, subpolicy_inputs, kl_divergence)
# Update Plots.
self.update_plots(counter, loglikelihood, trajectory_segment)
if return_z:
return latent_z, sample_traj, sample_action_seq
else:
if return_z:
return latent_z, sample_traj, sample_action_seq
else:
np.set_printoptions(suppress=True,precision=2)
print("###################", i)
print("Policy loglikelihood:", loglikelihood)
print("#########################################")
else:
return None, None, None
def evaluate_metrics(self):
self.distances = -np.ones((self.test_set_size))
# Get test set elements as last (self.test_set_size) number of elements of dataset.
for i in range(self.test_set_size):
index = i + len(self.dataset)-self.test_set_size
print("Evaluating ", i, " in test set, or ", index, " in dataset.")
# Get latent z.
latent_z, sample_traj, sample_action_seq = self.run_iteration(0, index, return_z=True)
if sample_traj is not None:
# Feed latent z to the rollout.
# rollout_trajectory = self.rollout_visuals(index, latent_z=latent_z, return_traj=True)
rollout_trajectory = self.rollout_robot_trajectory(sample_traj[0], latent_z, rollout_length=len(sample_traj))
self.distances[i] = ((sample_traj-rollout_trajectory)**2).mean()
self.mean_distance = self.distances[self.distances>0].mean()
def evaluate(self, model=None, suffix=None):
if model:
self.load_all_models(model)
np.set_printoptions(suppress=True,precision=2)
if self.args.data=='ContinuousNonZero':
self.visualize_embedding_space(suffix=suffix)
if self.args.data=="MIME" or self.args.data=='Roboturk' or self.args.data=='OrigRoboturk' or self.args.data=='FullRoboturk' or self.args.data=='Mocap':
print("Running Evaluation of State Distances on small test set.")
# self.evaluate_metrics()
# Only running viz if we're actually pretraining.
if self.args.traj_segments:
print("Running Visualization on Robot Data.")
self.visualize_robot_data()
else:
# Create save directory:
upper_dir_name = os.path.join(self.args.logdir,self.args.name,"MEval")
if not(os.path.isdir(upper_dir_name)):
os.mkdir(upper_dir_name)
model_epoch = int(os.path.split(self.args.model)[1].lstrip("Model_epoch"))
self.dir_name = os.path.join(self.args.logdir,self.args.name,"MEval","m{0}".format(model_epoch))
if not(os.path.isdir(self.dir_name)):
os.mkdir(self.dir_name)
# np.save(os.path.join(self.dir_name,"Trajectory_Distances_{0}.npy".format(self.args.name)),self.distances)
# np.save(os.path.join(self.dir_name,"Mean_Trajectory_Distance_{0}.npy".format(self.args.name)),self.mean_distance)
# @profile
def get_trajectory_and_latent_sets(self):
# For N number of random trajectories from MIME:
# # Encode trajectory using encoder into latent_z.
# # Feed latent_z into subpolicy.
# # Rollout subpolicy for t timesteps.
# # Plot rollout.
# Embed plots.
# Set N:
self.N = 100
self.rollout_timesteps = 5
self.state_dim = 2
self.latent_z_set = np.zeros((self.N,self.latent_z_dimensionality))
self.trajectory_set = np.zeros((self.N, self.rollout_timesteps, self.state_dim))
# Use the dataset to get reasonable trajectories (because without the information bottleneck / KL between N(0,1), cannot just randomly sample.)
for i in range(self.N):
# (1) Encoder trajectory.
latent_z, _, _ = self.run_iteration(0, i, return_z=True, and_train=False)
# Copy z.
self.latent_z_set[i] = copy.deepcopy(latent_z.detach().cpu().numpy())
# (2) Now rollout policy.
self.trajectory_set[i] = self.rollout_visuals(i, latent_z=latent_z, return_traj=True)
# # (3) Plot trajectory.
# traj_image = self.visualize_trajectory(rollout_traj)
def visualize_embedding_space(self, suffix=None):
self.get_trajectory_and_latent_sets()
# TSNE on latentz's.
tsne = skl_manifold.TSNE(n_components=2,random_state=0)
embedded_zs = tsne.fit_transform(self.latent_z_set)
ratio = 0.3
for i in range(self.N):
plt.scatter(embedded_zs[i,0]+ratio*self.trajectory_set[i,:,0],embedded_zs[i,1]+ratio*self.trajectory_set[i,:,1],c=range(self.rollout_timesteps),cmap='jet')
model_epoch = int(os.path.split(self.args.model)[1].lstrip("Model_epoch"))
self.dir_name = os.path.join(self.args.logdir,self.args.name,"MEval","m{0}".format(model_epoch))
if not(os.path.isdir(self.dir_name)):
os.mkdir(self.dir_name)
if suffix is not None:
self.dir_name = os.path.join(self.dir_name, suffix)
if not(os.path.isdir(self.dir_name)):
os.mkdir(self.dir_name)
# Format with name.
plt.savefig("{0}/Embedding_Joint_{1}.png".format(self.dir_name,self.args.name))
plt.close()
class PolicyManager_Joint(PolicyManager_BaseClass):
# Basic Training Algorithm:
# For E epochs:
# # For all trajectories:
# # Sample latent variables from conditional.
# # (Concatenate Latent Variables into Input.)
# # Evaluate log likelihoods of actions and options.
# # Update parameters.
def __init__(self, number_policies=4, dataset=None, args=None):
super(PolicyManager_Joint, self).__init__()
self.args = args
self.data = self.args.data
self.number_policies = number_policies
self.latent_z_dimensionality = self.args.z_dimensions
self.dataset = dataset
# Global input size: trajectory at every step - x,y,action
# Inputs is now states and actions.
# Model size parameters
self.state_size = 2
self.state_dim = 2
self.input_size = 2*self.state_size
self.hidden_size = self.args.hidden_size
# Number of actions
self.output_size = 2
self.number_layers = self.args.number_layers
self.traj_length = 5
self.conditional_info_size = 6
if self.args.data=='MIME':
self.state_size = 16
self.state_dim = 16
self.input_size = 2*self.state_size
self.output_size = self.state_size
self.traj_length = self.args.traj_length
# Create Baxter visualizer for MIME data
# self.visualizer = BaxterVisualizer.MujocoVisualizer()
self.visualizer = BaxterVisualizer()
if self.args.normalization=='meanvar':
self.norm_sub_value = np.load("Statistics/MIME_Means.npy")
self.norm_denom_value = np.load("Statistics/MIME_Var.npy")
elif self.args.normalization=='minmax':
self.norm_sub_value = np.load("Statistics/MIME_Min.npy")
self.norm_denom_value = np.load("Statistics/MIME_Max.npy") - np.load("Statistics/MIME_Min.npy")
# Max of robot_state + object_state sizes across all Baxter environments.
self.cond_robot_state_size = 60
self.cond_object_state_size = 25
self.conditional_info_size = self.cond_robot_state_size+self.cond_object_state_size
self.conditional_viz_env = False
elif self.args.data=='Roboturk' or self.args.data=='OrigRoboturk' or self.args.data=='FullRoboturk':
self.state_size = 8
self.state_dim = 8
self.input_size = 2*self.state_size
self.output_size = self.state_size
self.traj_length = self.args.traj_length
self.visualizer = SawyerVisualizer()
# Max of robot_state + object_state sizes across all sawyer environments.
# Robot size always 30. Max object state size is... 23.
self.cond_robot_state_size = 30
self.cond_object_state_size = 23
self.number_tasks = 8
self.conditional_info_size = self.cond_robot_state_size+self.cond_object_state_size+self.number_tasks
self.conditional_viz_env = True
elif self.args.data=='Mocap':
self.state_size = 22*3
self.state_dim = 22*3
self.input_size = 2*self.state_size
self.hidden_size = self.args.hidden_size
self.output_size = self.state_size
self.traj_length = self.args.traj_length
self.conditional_info_size = 0
self.conditional_information = None
self.conditional_viz_env = False
# Create visualizer object
self.visualizer = MocapVisualizer(args=self.args)
self.training_phase_size = self.args.training_phase_size
self.number_epochs = self.args.epochs
self.test_set_size = 500
self.baseline_value = 0.
self.beta_decay = 0.9
self. learning_rate = self.args.learning_rate
self.latent_b_loss_weight = self.args.lat_b_wt
self.latent_z_loss_weight = self.args.lat_z_wt
self.initial_epsilon = self.args.epsilon_from
self.final_epsilon = self.args.epsilon_to
self.decay_epochs = self.args.epsilon_over
self.decay_counter = self.decay_epochs*len(self.dataset)
# Log-likelihood penalty.
self.lambda_likelihood_penalty = self.args.likelihood_penalty
self.baseline = None
# Per step decay.
self.decay_rate = (self.initial_epsilon-self.final_epsilon)/(self.decay_counter)
def create_networks(self):
if self.args.discrete_z:
# Create K Policy Networks.
# This policy network automatically manages input size.
# self.policy_network = ContinuousPolicyNetwork(self.input_size,self.hidden_size,self.output_size,self.number_policies, self.number_layers).to(device)
self.policy_network = ContinuousPolicyNetwork(self.input_size, self.hidden_size, self.output_size, self.args, self.number_layers).to(device)
# Create latent policy, whose action space = self.number_policies.
# This policy network automatically manages input size.
# Also add conditional_info_size to this.
self.latent_policy = LatentPolicyNetwork(self.input_size, self.hidden_size, self.number_policies, self.number_layers, self.args.b_exploration_bias).to(device)
# Create variational network.
# self.variational_policy = VariationalPolicyNetwork(self.input_size, self.hidden_size, self.number_policies, number_layers=self.number_layers, z_exploration_bias=self.args.z_exploration_bias, b_exploration_bias=self.args.b_exploration_bias).to(device)
self.variational_policy = VariationalPolicyNetwork(self.input_size, self.hidden_size, self.number_policies, self.args, number_layers=self.number_layers).to(device)
else:
# self.policy_network = ContinuousPolicyNetwork(self.input_size,self.hidden_size,self.output_size,self.latent_z_dimensionality, self.number_layers).to(device)
self.policy_network = ContinuousPolicyNetwork(self.input_size, self.hidden_size, self.output_size, self.args, self.number_layers).to(device)
if self.args.constrained_b_prior:
self.latent_policy = ContinuousLatentPolicyNetwork_ConstrainedBPrior(self.input_size+self.conditional_info_size, self.hidden_size, self.args, self.number_layers).to(device)
self.variational_policy = ContinuousVariationalPolicyNetwork_ConstrainedBPrior(self.input_size, self.hidden_size, self.latent_z_dimensionality, self.args, number_layers=self.number_layers).to(device)
else:
# self.latent_policy = ContinuousLatentPolicyNetwork(self.input_size, self.hidden_size, self.latent_z_dimensionality, self.number_layers, self.args.b_exploration_bias).to(device)
self.latent_policy = ContinuousLatentPolicyNetwork(self.input_size+self.conditional_info_size, self.hidden_size, self.args, self.number_layers).to(device)
self.variational_policy = ContinuousVariationalPolicyNetwork_BPrior(self.input_size, self.hidden_size, self.latent_z_dimensionality, self.args, number_layers=self.number_layers).to(device)
def create_training_ops(self):
self.negative_log_likelihood_loss_function = torch.nn.NLLLoss(reduction='none')
# If we are using reparameterization, use a global optimizer, and a global loss function.
# This means gradients are being handled properly.
parameter_list = list(self.latent_policy.parameters()) + list(self.variational_policy.parameters())
if not(self.args.fix_subpolicy):
parameter_list = parameter_list + list(self.policy_network.parameters())
self.optimizer = torch.optim.Adam(parameter_list, lr=self.learning_rate)
def save_all_models(self, suffix):
logdir = os.path.join(self.args.logdir, self.args.name)
savedir = os.path.join(logdir,"saved_models")
if not(os.path.isdir(savedir)):
os.mkdir(savedir)
save_object = {}
save_object['Latent_Policy'] = self.latent_policy.state_dict()
save_object['Policy_Network'] = self.policy_network.state_dict()
save_object['Variational_Policy'] = self.variational_policy.state_dict()
torch.save(save_object,os.path.join(savedir,"Model_"+suffix))
def load_all_models(self, path, just_subpolicy=False):
load_object = torch.load(path)
self.policy_network.load_state_dict(load_object['Policy_Network'])
if not(just_subpolicy):
if self.args.load_latent:
self.latent_policy.load_state_dict(load_object['Latent_Policy'])
self.variational_policy.load_state_dict(load_object['Variational_Policy'])
def set_epoch(self, counter):
if self.args.train:
if counter<self.decay_counter:
self.epsilon = self.initial_epsilon-self.decay_rate*counter
else:
self.epsilon = self.final_epsilon
if counter<self.training_phase_size:
self.training_phase=1
elif self.training_phase_size<=counter and counter<2*self.training_phase_size:
self.training_phase=2
print("In Phase 2.")
else:
self.training_phase=3
self.latent_z_loss_weight = 0.01*self.args.lat_b_wt
# For training phase = 3, set latent_b_loss weight to 1 and latent_z_loss weight to something like 0.1 or 0.01.
# After another double training_phase... (i.e. counter>3*self.training_phase_size),
# This should be run when counter > 2*self.training_phase_size, and less than 3*self.training_phase_size.
if counter>3*self.training_phase_size:
# Set equal after 3.
print("In Phase 4.")
self.latent_z_loss_weight = 0.1*self.args.lat_b_wt
else:
print("In Phase 3.")
else:
self.epsilon = 0.
self.training_phase=1
def visualize_trajectory(self, trajectory, segmentations=None, i=0, suffix='_Img'):
if self.args.data=='MIME' or self.args.data=='Roboturk' or self.args.data=='OrigRoboturk' or self.args.data=='FullRoboturk' or self.args.data=='Mocap':
if self.args.normalization=='meanvar' or self.args.normalization=='minmax':
unnorm_trajectory = (trajectory*self.norm_denom_value)+self.norm_sub_value
else:
unnorm_trajectory = trajectory
if self.args.data=='Mocap':
# Create save directory:
upper_dir_name = os.path.join(self.args.logdir,self.args.name,"MEval")
if not(os.path.isdir(upper_dir_name)):
os.mkdir(upper_dir_name)
if self.args.model is not None:
model_epoch = int(os.path.split(self.args.model)[1].lstrip("Model_epoch"))
else:
model_epoch = self.current_epoch_running
self.dir_name = os.path.join(self.args.logdir,self.args.name,"MEval","m{0}".format(model_epoch))
if not(os.path.isdir(self.dir_name)):
os.mkdir(self.dir_name)
animation_object = self.dataset[i]['animation']
return self.visualizer.visualize_joint_trajectory(unnorm_trajectory, gif_path=self.dir_name, gif_name="Traj_{0}_{1}.gif".format(i,suffix), return_and_save=True, additional_info=animation_object)
else:
return self.visualizer.visualize_joint_trajectory(unnorm_trajectory, return_gif=True, segmentations=segmentations)
else:
return self.visualize_2D_trajectory(trajectory)
def visualize_2D_trajectory(self, traj):
fig = plt.figure()
ax = fig.gca()
ax.scatter(traj[:,0],traj[:,1],c=range(len(traj)),cmap='jet')
scale = 30
plt.xlim(-scale,scale)
plt.ylim(-scale,scale)
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8).reshape(int(height), int(width), 3)
# Already got image data. Now close plot so it doesn't cry.
# fig.gcf()
plt.close()
image = np.transpose(image, axes=[2,0,1])
return image
def compute_evaluation_metrics(self, sample_traj, counter, i):
# # Generate trajectory rollouts so we can calculate distance metric.
# self.rollout_visuals(counter, i, get_image=False)
# Compute trajectory distance between:
var_rollout_distance = ((self.variational_trajectory_rollout-sample_traj)**2).mean()
latent_rollout_distance = ((self.latent_trajectory_rollout-sample_traj)**2).mean()
return var_rollout_distance, latent_rollout_distance
def update_plots(self, counter, i, subpolicy_loglikelihood, latent_loglikelihood, subpolicy_entropy, sample_traj, latent_z_logprobability, latent_b_logprobability, kl_divergence, prior_loglikelihood):
self.tf_logger.scalar_summary('Latent Policy Loss', torch.mean(self.total_latent_loss), counter)
self.tf_logger.scalar_summary('SubPolicy Log Likelihood', subpolicy_loglikelihood.mean(), counter)
self.tf_logger.scalar_summary('Latent Log Likelihood', latent_loglikelihood.mean(), counter)
self.tf_logger.scalar_summary('Variational Policy Loss', torch.mean(self.variational_loss), counter)
self.tf_logger.scalar_summary('Variational Reinforce Loss', torch.mean(self.reinforce_variational_loss), counter)
self.tf_logger.scalar_summary('Total Variational Policy Loss', torch.mean(self.total_variational_loss), counter)
self.tf_logger.scalar_summary('Baseline', self.baseline.mean(), counter)
self.tf_logger.scalar_summary('Total Likelihood', subpolicy_loglikelihood+latent_loglikelihood, counter)
self.tf_logger.scalar_summary('Epsilon', self.epsilon, counter)
self.tf_logger.scalar_summary('Latent Z LogProbability', latent_z_logprobability, counter)
self.tf_logger.scalar_summary('Latent B LogProbability', latent_b_logprobability, counter)
self.tf_logger.scalar_summary('KL Divergence', torch.mean(kl_divergence), counter)
self.tf_logger.scalar_summary('Prior LogLikelihood', torch.mean(prior_loglikelihood), counter)
if counter%self.args.display_freq==0:
# Now adding visuals for MIME, so it doesn't depend what data we use.
variational_rollout_image, latent_rollout_image = self.rollout_visuals(counter, i)
# Compute distance metrics.
var_dist, latent_dist = self.compute_evaluation_metrics(sample_traj, counter, i)
self.tf_logger.scalar_summary('Variational Trajectory Distance', var_dist, counter)
self.tf_logger.scalar_summary('Latent Trajectory Distance', latent_dist, counter)
gt_trajectory_image = np.array(self.visualize_trajectory(sample_traj, i=i, suffix='GT'))
variational_rollout_image = np.array(variational_rollout_image)
latent_rollout_image = np.array(latent_rollout_image)
if self.args.data=='MIME' or self.args.data=='Roboturk' or self.args.data=='OrigRoboturk' or self.args.data=='FullRoboturk' or self.args.data=='Mocap':
# Feeding as list of image because gif_summary.
self.tf_logger.gif_summary("GT Trajectory",[gt_trajectory_image],counter)
self.tf_logger.gif_summary("Variational Rollout",[variational_rollout_image],counter)
self.tf_logger.gif_summary("Latent Rollout",[latent_rollout_image],counter)
else:
# Feeding as list of image because gif_summary.
self.tf_logger.image_summary("GT Trajectory",[gt_trajectory_image],counter)
self.tf_logger.image_summary("Variational Rollout",[variational_rollout_image],counter)
self.tf_logger.image_summary("Latent Rollout",[latent_rollout_image],counter)
def assemble_inputs(self, input_trajectory, latent_z_indices, latent_b, sample_action_seq, conditional_information=None):
if self.args.discrete_z:
# Append latent z indices to sample_traj data to feed as input to BOTH the latent policy network and the subpolicy network.
assembled_inputs = torch.zeros((len(input_trajectory),self.input_size+self.number_policies+1)).to(device)
assembled_inputs[:,:self.input_size] = torch.tensor(input_trajectory).view(len(input_trajectory),self.input_size).to(device).float()
assembled_inputs[range(1,len(input_trajectory)),self.input_size+latent_z_indices[:-1].long()] = 1.
assembled_inputs[range(1,len(input_trajectory)),-1] = latent_b[:-1].float()
# Now assemble inputs for subpolicy.
subpolicy_inputs = torch.zeros((len(input_trajectory),self.input_size+self.number_policies)).to(device)
subpolicy_inputs[:,:self.input_size] = torch.tensor(input_trajectory).view(len(input_trajectory),self.input_size).to(device).float()
subpolicy_inputs[range(len(input_trajectory)),self.input_size+latent_z_indices.long()] = 1.
# subpolicy_inputs[range(len(input_trajectory)),-1] = latent_b.float()
# # This method of concatenation is wrong, because it evaluates likelihood of action [0,0] as well.
# # Concatenated action sqeuence for policy network.
# padded_action_seq = np.concatenate([np.zeros((1,self.output_size)),sample_action_seq],axis=0)
# This is the right method of concatenation, because it evaluates likelihood
padded_action_seq = np.concatenate([sample_action_seq, np.zeros((1,self.output_size))],axis=0)
return assembled_inputs, subpolicy_inputs, padded_action_seq
else:
if self.training_phase>1:
# Prevents gradients being propagated through this..
latent_z_copy = torch.tensor(latent_z_indices).to(device)
else:
latent_z_copy = latent_z_indices
if conditional_information is None:
conditional_information = torch.zeros((self.conditional_info_size)).to(device).float()
# Append latent z indices to sample_traj data to feed as input to BOTH the latent policy network and the subpolicy network.
assembled_inputs = torch.zeros((len(input_trajectory),self.input_size+self.latent_z_dimensionality+1+self.conditional_info_size)).to(device)
assembled_inputs[:,:self.input_size] = torch.tensor(input_trajectory).view(len(input_trajectory),self.input_size).to(device).float()
assembled_inputs[range(1,len(input_trajectory)),self.input_size:self.input_size+self.latent_z_dimensionality] = latent_z_copy[:-1]
# We were writing the wrong dimension... should we be running again? :/
assembled_inputs[range(1,len(input_trajectory)),self.input_size+self.latent_z_dimensionality] = latent_b[:-1].float()
# assembled_inputs[range(1,len(input_trajectory)),-self.conditional_info_size:] = torch.tensor(conditional_information).to(device).float()
# Instead of feeding conditional infromation only from 1'st timestep onwards, we are going to st it from the first timestep.
if self.conditional_info_size>0:
assembled_inputs[:,-self.conditional_info_size:] = torch.tensor(conditional_information).to(device).float()
# Now assemble inputs for subpolicy.
subpolicy_inputs = torch.zeros((len(input_trajectory),self.input_size+self.latent_z_dimensionality)).to(device)
subpolicy_inputs[:,:self.input_size] = torch.tensor(input_trajectory).view(len(input_trajectory),self.input_size).to(device).float()
subpolicy_inputs[range(len(input_trajectory)),self.input_size:] = latent_z_indices
# # This method of concatenation is wrong, because it evaluates likelihood of action [0,0] as well.
# # Concatenated action sqeuence for policy network.
# padded_action_seq = np.concatenate([np.zeros((1,self.output_size)),sample_action_seq],axis=0)
# This is the right method of concatenation, because it evaluates likelihood
padded_action_seq = np.concatenate([sample_action_seq, np.zeros((1,self.output_size))],axis=0)
return assembled_inputs, subpolicy_inputs, padded_action_seq
def concat_state_action(self, sample_traj, sample_action_seq):
# Add blank to start of action sequence and then concatenate.
sample_action_seq = np.concatenate([np.zeros((1,self.output_size)),sample_action_seq],axis=0)
# Currently returns:
# s0, s1, s2, s3, ..., sn-1, sn
# _, a0, a1, a2, ..., an_1, an
return np.concatenate([sample_traj, sample_action_seq],axis=-1)
def old_concat_state_action(self, sample_traj, sample_action_seq):
# Add blank to the END of action sequence and then concatenate.
sample_action_seq = np.concatenate([sample_action_seq, np.zeros((1,self.output_size))],axis=0)
return np.concatenate([sample_traj, sample_action_seq],axis=-1)
def setup_eval_against_encoder(self):
# Creates a network, loads the network from pretraining model file.
self.encoder_network = ContinuousEncoderNetwork(self.input_size, self.hidden_size, self.latent_z_dimensionality, self.args).to(device)
load_object = torch.load(self.args.subpolicy_model)
self.encoder_network.load_state_dict(load_object['Encoder_Network'])
# Force encoder to use original variance for eval.
self.encoder_network.variance_factor = 1.
def evaluate_loglikelihoods(self, sample_traj, sample_action_seq, concatenated_traj, latent_z_indices, latent_b):
# Initialize both loglikelihoods to 0.
subpolicy_loglikelihood = 0.
latent_loglikelihood = 0.
# Need to assemble inputs first - returns a Torch CUDA Tensor.
# This doesn't need to take in actions, because we can evaluate for all actions then select.
assembled_inputs, subpolicy_inputs, padded_action_seq = self.assemble_inputs(concatenated_traj, latent_z_indices, latent_b, sample_action_seq, self.conditional_information)
###########################
# Compute learnt subpolicy loglikelihood.
###########################
learnt_subpolicy_loglikelihoods, entropy = self.policy_network.forward(subpolicy_inputs, padded_action_seq)
# Clip values. # Comment this out to remove clipping.
learnt_subpolicy_loglikelihoods = torch.clamp(learnt_subpolicy_loglikelihoods,min=self.args.subpolicy_clamp_value)
# Multiplying the likelihoods with the subpolicy ratio before summing.
learnt_subpolicy_loglikelihoods = self.args.subpolicy_ratio*learnt_subpolicy_loglikelihoods
# Summing until penultimate timestep.
# learnt_subpolicy_loglikelihood = learnt_subpolicy_loglikelihoods[:-1].sum()
# TAKING AVERAGE HERE AS WELL.
learnt_subpolicy_loglikelihood = learnt_subpolicy_loglikelihoods[:-1].mean()
###########################
# Compute Latent policy loglikelihood values.
###########################
# Whether to clone assembled_inputs based on the phase of training.
# In phase one it doesn't matter if we use the clone or not, because we never use latent policy loss.
# So just clone anyway.
# For now, ignore phase 3. This prevents gradients from going into the variational policy from the latent policy.
assembled_inputs_copy = assembled_inputs.clone().detach()
latent_z_copy = latent_z_indices.clone().detach()
# Consideration for later:
# if self.training_phase==3:
# Don't clone.
if self.args.discrete_z:
# Return discrete probabilities from latent policy network.
latent_z_logprobabilities, latent_b_logprobabilities, latent_b_probabilities, latent_z_probabilities = self.latent_policy.forward(assembled_inputs_copy)
# # Selects first option for variable = 1, second option for variable = 0.
# Use this to check if latent_z elements are equal:
diff_val = (1-(latent_z_indices==latent_z_indices.roll(1,0))[1:]).to(device).float()
# We rolled latent_z, we didn't roll diff. This works because latent_b is always guaranteed to be 1 in the first timestep, so it doesn't matter what's in diff_val[0].
diff_val = diff_val.roll(1,0)
# Selects first option for variable = 1, second option for variable = 0.
latent_z_temporal_logprobabilities = torch.where(latent_b[:-1].byte(), latent_z_logprobabilities[range(len(sample_traj)-1),latent_z_indices[:-1].long()], -self.lambda_likelihood_penalty*diff_val)
latent_z_logprobability = latent_z_temporal_logprobabilities.mean()
else:
# If not, we need to evaluate the latent probabilties of latent_z_indices under latent_policy.
latent_b_logprobabilities, latent_b_probabilities, latent_distributions = self.latent_policy.forward(assembled_inputs_copy, self.epsilon)
# Evalute loglikelihood of latent z vectors under the latent policy's distributions.
latent_z_logprobabilities = latent_distributions.log_prob(latent_z_copy.unsqueeze(1))
# Multiply logprobabilities by the latent policy ratio.
latent_z_temporal_logprobabilities = latent_z_logprobabilities[:-1]*self.args.latentpolicy_ratio
latent_z_logprobability = latent_z_temporal_logprobabilities.mean()
latent_z_probabilities = None
# LATENT LOGLIKELIHOOD is defined as:
# = \sum_{t=1}^T \log p(\zeta_t | \tau_{1:t}, \zeta_{1:t-1})
# = \sum_{t=1}^T \log { \phi_t(b_t)} + \log { 1[b_t==1] \eta_t(h_t|s_{1:t}) + 1[b_t==0] 1[z_t==z_{t-1}] }
# Adding log probabilities of termination (of whether it terminated or not), till penultimate step.
latent_b_temporal_logprobabilities = latent_b_logprobabilities[range(len(sample_traj)-1),latent_b[:-1].long()]
latent_b_logprobability = latent_b_temporal_logprobabilities.mean()
latent_loglikelihood += latent_b_logprobability
latent_loglikelihood += latent_z_logprobability
# DON'T CLAMP, JUST MULTIPLY BY SUITABLE RATIO! Probably use the same lat_z_wt and lat_b_wt ratios from the losses.
latent_temporal_loglikelihoods = self.args.lat_b_wt*latent_b_temporal_logprobabilities + self.args.lat_z_wt*latent_z_temporal_logprobabilities.squeeze(1)
##################################################
#### Manage merging likelihoods for REINFORCE ####
##################################################
if self.training_phase==1:
temporal_loglikelihoods = learnt_subpolicy_loglikelihoods[:-1].squeeze(1)
elif self.training_phase==2 or self.training_phase==3:
# temporal_loglikelihoods = learnt_subpolicy_loglikelihoods[:-1].squeeze(1) + self.args.temporal_latentpolicy_ratio*latent_temporal_loglikelihoods
temporal_loglikelihoods = learnt_subpolicy_loglikelihoods[:-1].squeeze(1)
if self.args.debug:
if self.iter%self.args.debug==0:
print("Embedding in the Evaluate Likelihoods Function.")
embed()
return None, None, None, latent_loglikelihood, \
latent_b_logprobabilities, latent_z_logprobabilities, latent_b_probabilities, latent_z_probabilities, \
latent_z_logprobability, latent_b_logprobability, learnt_subpolicy_loglikelihood, learnt_subpolicy_loglikelihoods, temporal_loglikelihoods
def new_update_policies(self, i, sample_action_seq, subpolicy_loglikelihoods, subpolicy_entropy, latent_b, latent_z_indices,\
variational_z_logprobabilities, variational_b_logprobabilities, variational_z_probabilities, variational_b_probabilities, kl_divergence, \
latent_z_logprobabilities, latent_b_logprobabilities, latent_z_probabilities, latent_b_probabilities, \
learnt_subpolicy_loglikelihood, learnt_subpolicy_loglikelihoods, loglikelihood, prior_loglikelihood, latent_loglikelihood, temporal_loglikelihoods):
# Set optimizer gradients to zero.
self.optimizer.zero_grad()
# Assemble prior and KL divergence losses.
# Since these are output by the variational network, and we don't really need the last z predicted by it.
prior_loglikelihood = prior_loglikelihood[:-1]
kl_divergence = kl_divergence[:-1]
######################################################
############## Update latent policy. #################
######################################################
# Remember, an NLL loss function takes <Probabilities, Sampled Value> as arguments.
self.latent_b_loss = self.negative_log_likelihood_loss_function(latent_b_logprobabilities, latent_b.long())
if self.args.discrete_z:
self.latent_z_loss = self.negative_log_likelihood_loss_function(latent_z_logprobabilities, latent_z_indices.long())
# If continuous latent_z, just calculate loss as negative log likelihood of the latent_z's selected by variational network.
else:
self.latent_z_loss = -latent_z_logprobabilities.squeeze(1)
# Compute total latent loss as weighted sum of latent_b_loss and latent_z_loss.
self.total_latent_loss = (self.latent_b_loss_weight*self.latent_b_loss+self.latent_z_loss_weight*self.latent_z_loss)[:-1]
#######################################################
############# Compute Variational Losses ##############
#######################################################
# MUST ALWAYS COMPUTE: # Compute cross entropies.
self.variational_b_loss = self.negative_log_likelihood_loss_function(variational_b_logprobabilities[:-1], latent_b[:-1].long())
# In case of reparameterization, the variational loss that goes to REINFORCE should just be variational_b_loss.
self.variational_loss = self.args.var_loss_weight*self.variational_b_loss
#######################################################
########## Compute Variational Reinforce Loss #########
#######################################################
# Compute reinforce target based on how we express the objective:
# The original implementation, i.e. the entropic implementation, uses:
# (1) \mathbb{E}_{x, z \sim q(z|x)} \Big[ \nabla_{\omega} \log q(z|x,\omega) \{ \log p(x||z) + \log p(z||x) - \log q(z|x) - 1 \} \Big]
# The KL divergence implementation uses:
# (2) \mathbb{E}_{x, z \sim q(z|x)} \Big[ \nabla_{\omega} \log q(z|x,\omega) \{ \log p(x||z) + \log p(z||x) - \log p(z) \} \Big] - \nabla_{\omega} D_{KL} \Big[ q(z|x) || p(z) \Big]
# Compute baseline target according to NEW GRADIENT, and Equation (2) above.
baseline_target = (temporal_loglikelihoods - self.args.prior_weight*prior_loglikelihood).clone().detach()
if self.baseline is None:
self.baseline = torch.zeros_like(baseline_target.mean()).to(device).float()
else:
self.baseline = (self.beta_decay*self.baseline)+(1.-self.beta_decay)*baseline_target.mean()
self.reinforce_variational_loss = self.variational_loss*(baseline_target-self.baseline)
# If reparam, the variational loss is a combination of three things.
# Losses from latent policy and subpolicy into variational network for the latent_z's, the reinforce loss on the latent_b's, and the KL divergence.
# But since we don't need to additionall compute the gradients from latent and subpolicy into variational network, just set the variational loss to reinforce + KL.
# self.total_variational_loss = (self.reinforce_variational_loss.sum() + self.args.kl_weight*kl_divergence.squeeze(1).sum()).sum()
self.total_variational_loss = (self.reinforce_variational_loss + self.args.kl_weight*kl_divergence.squeeze(1)).mean()
######################################################
# Set other losses, subpolicy, latent, and prior.
######################################################
# Get subpolicy losses.
self.subpolicy_loss = (-learnt_subpolicy_loglikelihood).mean()
# Get prior losses.
self.prior_loss = (-self.args.prior_weight*prior_loglikelihood).mean()
# Reweight latent loss.
self.total_weighted_latent_loss = (self.args.latent_loss_weight*self.total_latent_loss).mean()
################################################
# Setting total loss based on phase of training.
################################################
# IF PHASE ONE:
if self.training_phase==1:
self.total_loss = self.subpolicy_loss + self.total_variational_loss + self.prior_loss
# IF DONE WITH PHASE ONE:
elif self.training_phase==2 or self.training_phase==3:
self.total_loss = self.subpolicy_loss + self.total_weighted_latent_loss + self.total_variational_loss + self.prior_loss
################################################
if self.args.debug:
if self.iter%self.args.debug==0:
print("Embedding in Update Policies")
embed()
################################################
self.total_loss.sum().backward()
self.optimizer.step()
def set_env_conditional_info(self):
obs = self.environment._get_observation()
self.conditional_information = np.zeros((self.conditional_info_size))
cond_state = np.concatenate([obs['robot-state'],obs['object-state']])
self.conditional_information[:cond_state.shape[-1]] = cond_state
# Also setting particular index in conditional information to 1 for task ID.
self.conditional_information[-self.number_tasks+self.task_id_for_cond_info] = 1
def take_rollout_step(self, subpolicy_input, t, use_env=False):
# Feed subpolicy input into the policy.
actions = self.policy_network.get_actions(subpolicy_input,greedy=True)
# Select last action to execute.
action_to_execute = actions[-1].squeeze(1)
if use_env==True:
# Take a step in the environment.
step_res = self.environment.step(action_to_execute.squeeze(0).detach().cpu().numpy())
# Get state.
observation = step_res[0]
# Now update conditional information...
# self.conditional_information = np.concatenate([new_state['robot-state'],new_state['object-state']])
gripper_open = np.array([0.0115, -0.0115])
gripper_closed = np.array([-0.020833, 0.020833])
# The state that we want is ... joint state?
gripper_finger_values = step_res[0]['gripper_qpos']
gripper_values = (gripper_finger_values - gripper_open)/(gripper_closed - gripper_open)
finger_diff = gripper_values[1]-gripper_values[0]
gripper_value = 2*finger_diff-1
# Concatenate joint and gripper state.
new_state_numpy = np.concatenate([observation['joint_pos'], np.array(gripper_value).reshape((1,))])
new_state = torch.tensor(new_state_numpy).to(device).float().view((1,-1))
# This should be true by default...
# if self.conditional_viz_env:
# self.set_env_conditional_info()
self.set_env_conditional_info()
else:
# Compute next state by adding action to state.
new_state = subpolicy_input[t,:self.state_dim]+action_to_execute
# return new_subpolicy_input
return action_to_execute, new_state
def create_RL_environment_for_rollout(self, environment_name, state=None, task_id=None):
self.environment = robosuite.make(environment_name)
self.task_id_for_cond_info = task_id
if state is not None:
self.environment.sim.set_state_from_flattened(state)
def rollout_variational_network(self, counter, i):
###########################################################
###########################################################
############# (0) #############
# Get sample we're going to train on. Single sample as of now.
sample_traj, sample_action_seq, concatenated_traj, old_concatenated_traj = self.collect_inputs(i)
if self.args.traj_length>0:
self.rollout_timesteps = self.args.traj_length
else:
self.rollout_timesteps = len(sample_traj)
############# (1) #############
# Sample latent variables from p(\zeta | \tau).
latent_z_indices, latent_b, variational_b_logprobabilities, variational_z_logprobabilities,\
variational_b_probabilities, variational_z_probabilities, kl_divergence, prior_loglikelihood = self.variational_policy.forward(torch.tensor(old_concatenated_traj).to(device).float(), self.epsilon)
############# (1.5) ###########
# Doesn't really matter what the conditional information is here... because latent policy isn't being rolled out.
# We still call it becasue these assembled inputs are passed to the latnet policy rollout later.
if self.conditional_viz_env:
self.set_env_conditional_info()
# Get assembled inputs and subpolicy inputs for variational rollout.
orig_assembled_inputs, orig_subpolicy_inputs, padded_action_seq = self.assemble_inputs(concatenated_traj, latent_z_indices, latent_b, sample_action_seq, self.conditional_information)
###########################################################
############# (A) VARIATIONAL POLICY ROLLOUT. #############
###########################################################
subpolicy_inputs = orig_subpolicy_inputs.clone().detach()
# For number of rollout timesteps:
for t in range(self.rollout_timesteps-1):
# Take a rollout step. Feed into policy, get action, step, return new input.
action_to_execute, new_state = self.take_rollout_step(subpolicy_inputs[:(t+1)].view((t+1,-1)), t)
state_action_tuple = torch.cat([new_state, action_to_execute],dim=1)
# Overwrite the subpolicy inputs with the new state action tuple.
subpolicy_inputs[t+1,:self.input_size] = state_action_tuple
# Get trajectory from this.
self.variational_trajectory_rollout = copy.deepcopy(subpolicy_inputs[:,:self.state_dim].detach().cpu().numpy())
return orig_assembled_inputs, orig_subpolicy_inputs, latent_b
def alternate_rollout_latent_policy(self, counter, i, orig_assembled_inputs, orig_subpolicy_inputs):
assembled_inputs = orig_assembled_inputs.clone().detach()
subpolicy_inputs = orig_subpolicy_inputs.clone().detach()
# This version of rollout uses the incremental reparam get actions function.
hidden = None
############# (0) #############
# Get sample we're going to train on. Single sample as of now.
sample_traj, sample_action_seq, concatenated_traj, old_concatenated_traj = self.collect_inputs(i)
# Set rollout length.
if self.args.traj_length>0:
self.rollout_timesteps = self.args.traj_length
else:
self.rollout_timesteps = len(sample_traj)
# For appropriate number of timesteps.
for t in range(self.rollout_timesteps-1):
# First get input row for latent policy.
# Feed into latent policy and get z.
# Feed z and b into subpolicy.
pass
def rollout_latent_policy(self, orig_assembled_inputs, orig_subpolicy_inputs):
assembled_inputs = orig_assembled_inputs.clone().detach()
subpolicy_inputs = orig_subpolicy_inputs.clone().detach()
# Set the previous b time to 0.
delta_t = 0
# For number of rollout timesteps:
for t in range(self.rollout_timesteps-1):
##########################################
#### CODE FOR NEW Z SELECTION ROLLOUT ####
##########################################
# Pick latent_z and latent_b.
selected_b, new_selected_z = self.latent_policy.get_actions(assembled_inputs[:(t+1)].view((t+1,-1)), greedy=True, delta_t=delta_t)
if t==0:
selected_b = torch.ones_like(selected_b).to(device).float()
if selected_b[-1]==1:
# Copy over ALL z's. This is okay to do because we're greedily selecting, and hte latent policy is hence deterministic.
selected_z = torch.tensor(new_selected_z).to(device).float()
# If b was == 1, then... reset b to 0.
delta_t = 0
else:
# Increment counter since last time b was 1.
delta_t += 1
# Set z's to 0.
assembled_inputs[t+1, self.input_size:self.input_size+self.number_policies] = 0.
# Set z and b in assembled input for the future latent policy passes.
if self.args.discrete_z:
assembled_inputs[t+1, self.input_size+selected_z[-1]] = 1.
else:
assembled_inputs[t+1, self.input_size:self.input_size+self.latent_z_dimensionality] = selected_z[-1]
# This was also using wrong dimensions... oops :P
assembled_inputs[t+1, self.input_size+self.latent_z_dimensionality] = selected_b[-1]
# Before copying over, set conditional_info from the environment at the current timestep.
if self.conditional_viz_env:
self.set_env_conditional_info()
if self.conditional_info_size>0:
assembled_inputs[t+1, -self.conditional_info_size:] = torch.tensor(self.conditional_information).to(device).float()
# Set z's to 0.
subpolicy_inputs[t, self.input_size:self.input_size+self.number_policies] = 0.
# Set z and b in subpolicy input for the future subpolicy passes.
if self.args.discrete_z:
subpolicy_inputs[t, self.input_size+selected_z[-1]] = 1.
else:
subpolicy_inputs[t, self.input_size:] = selected_z[-1]
# Now pass subpolicy net forward and get action and next state.
action_to_execute, new_state = self.take_rollout_step(subpolicy_inputs[:(t+1)].view((t+1,-1)), t, use_env=self.conditional_viz_env)
state_action_tuple = torch.cat([new_state, action_to_execute],dim=1)
# Now update assembled input.
assembled_inputs[t+1, :self.input_size] = state_action_tuple
subpolicy_inputs[t+1, :self.input_size] = state_action_tuple
self.latent_trajectory_rollout = copy.deepcopy(subpolicy_inputs[:,:self.state_dim].detach().cpu().numpy())
concatenated_selected_b = np.concatenate([selected_b.detach().cpu().numpy(),np.zeros((1))],axis=-1)
if self.args.debug:
print("Embedding in Latent Policy Rollout.")
embed()
# Clear these variables from memory.
del subpolicy_inputs, assembled_inputs
return concatenated_selected_b
def rollout_visuals(self, counter, i, get_image=True):
# if self.args.data=='Roboturk':
if self.conditional_viz_env:
self.create_RL_environment_for_rollout(self.dataset[i]['environment-name'], self.dataset[i]['flat-state'][0], self.dataset[i]['task-id'],)
# Rollout policy with
# a) Latent variable samples from variational policy operating on dataset trajectories - Tests variational network and subpolicies.
# b) Latent variable samples from latent policy in a rolling fashion, initialized with states from the trajectory - Tests latent and subpolicies.
# c) Latent variables from the ground truth set (only valid for the toy dataset) - Just tests subpolicies.
###########################################################
############# (A) VARIATIONAL POLICY ROLLOUT. #############
###########################################################
orig_assembled_inputs, orig_subpolicy_inputs, variational_segmentation = self.rollout_variational_network(counter, i)
###########################################################
################ (B) LATENT POLICY ROLLOUT. ###############
###########################################################
latent_segmentation = self.rollout_latent_policy(orig_assembled_inputs, orig_subpolicy_inputs)
if get_image==True:
latent_rollout_image = self.visualize_trajectory(self.latent_trajectory_rollout, segmentations=latent_segmentation, i=i, suffix='Latent')
variational_rollout_image = self.visualize_trajectory(self.variational_trajectory_rollout, segmentations=variational_segmentation.detach().cpu().numpy(), i=i, suffix='Variational')
return variational_rollout_image, latent_rollout_image
else:
return None, None
def run_iteration(self, counter, i):
# With learnt discrete subpolicy:
# For all epochs:
# # For all trajectories:
# # Sample z from variational network.
# # Evalute likelihood of latent policy, and subpolicy.
# # Update policies using likelihoods.
self.set_epoch(counter)
self.iter = counter
############# (0) #############
# Get sample we're going to train on. Single sample as of now.
sample_traj, sample_action_seq, concatenated_traj, old_concatenated_traj = self.collect_inputs(i)
if sample_traj is not None:
############# (1) #############
# Sample latent variables from p(\zeta | \tau).
latent_z_indices, latent_b, variational_b_logprobabilities, variational_z_logprobabilities,\
variational_b_probabilities, variational_z_probabilities, kl_divergence, prior_loglikelihood = self.variational_policy.forward(torch.tensor(old_concatenated_traj).to(device).float(), self.epsilon)
########## (2) & (3) ##########
# Evaluate Log Likelihoods of actions and options as "Return" for Variational policy.
subpolicy_loglikelihoods, subpolicy_loglikelihood, subpolicy_entropy,\
latent_loglikelihood, latent_b_logprobabilities, latent_z_logprobabilities,\
latent_b_probabilities, latent_z_probabilities, latent_z_logprobability, latent_b_logprobability, \
learnt_subpolicy_loglikelihood, learnt_subpolicy_loglikelihoods, temporal_loglikelihoods = self.evaluate_loglikelihoods(sample_traj, sample_action_seq, concatenated_traj, latent_z_indices, latent_b)
if self.args.train:
if self.args.debug:
if self.iter%self.args.debug==0:
print("Embedding in Train Function.")
embed()
############# (3) #############
# Update latent policy Pi_z with Reinforce like update using LL as return.
self.new_update_policies(i, sample_action_seq, subpolicy_loglikelihoods, subpolicy_entropy, latent_b, latent_z_indices,\
variational_z_logprobabilities, variational_b_logprobabilities, variational_z_probabilities, variational_b_probabilities, kl_divergence, \
latent_z_logprobabilities, latent_b_logprobabilities, latent_z_probabilities, latent_b_probabilities, \
learnt_subpolicy_loglikelihood, learnt_subpolicy_loglikelihoods, learnt_subpolicy_loglikelihood+latent_loglikelihood, \
prior_loglikelihood, latent_loglikelihood, temporal_loglikelihoods)
# Update Plots.
# self.update_plots(counter, sample_map, loglikelihood)
self.update_plots(counter, i, learnt_subpolicy_loglikelihood, latent_loglikelihood, subpolicy_entropy,
sample_traj, latent_z_logprobability, latent_b_logprobability, kl_divergence, prior_loglikelihood)
# print("Latent LogLikelihood: ", latent_loglikelihood)
# print("Subpolicy LogLikelihood: ", learnt_subpolicy_loglikelihood)
print("#########################################")
else:
if self.args.data=='MIME' or self.args.data=='Roboturk' or self.args.data=='OrigRoboturk' or self.args.data=='FullRoboturk' or self.args.data=='Mocap':
pass
else:
print("#############################################")
print("Trajectory",i)
print("Predicted Z: \n", latent_z_indices.detach().cpu().numpy())
print("True Z : \n", np.array(self.dataset.Y_array[i][:self.args.traj_length]))
print("Latent B : \n", latent_b.detach().cpu().numpy())
# print("Variational Probs: \n", variational_z_probabilities.detach().cpu().numpy())
# print("Latent Probs : \n", latent_z_probabilities.detach().cpu().numpy())
print("Latent B Probs : \n", latent_b_probabilities.detach().cpu().numpy())
if self.args.subpolicy_model:
eval_encoded_logprobs = torch.zeros((latent_z_indices.shape[0]))
eval_orig_encoder_logprobs = torch.zeros((latent_z_indices.shape[0]))
torch_concat_traj = torch.tensor(concatenated_traj).to(device).float()
# For each timestep z in latent_z_indices, evaluate likelihood under pretrained encoder model.
for t in range(latent_z_indices.shape[0]):
eval_encoded_logprobs[t] = self.encoder_network.forward(torch_concat_traj, z_sample_to_evaluate=latent_z_indices[t])
_, eval_orig_encoder_logprobs[t], _, _ = self.encoder_network.forward(torch_concat_traj)
print("Encoder Loglikelihood:", eval_encoded_logprobs.detach().cpu().numpy())
print("Orig Encoder Loglikelihood:", eval_orig_encoder_logprobs.detach().cpu().numpy())
if self.args.debug:
embed()
def evaluate_metrics(self):
self.distances = -np.ones((self.test_set_size))
# Get test set elements as last (self.test_set_size) number of elements of dataset.
for i in range(self.test_set_size):
index = i + len(self.dataset)-self.test_set_size
print("Evaluating ", i, " in test set, or ", index, " in dataset.")
# Collect inputs.
sample_traj, sample_action_seq, concatenated_traj, old_concatenated_traj = self.collect_inputs(i)
# If valid
if sample_traj is not None:
# Create environment to get conditional info.
if self.conditional_viz_env:
self.create_RL_environment_for_rollout(self.dataset[i]['environment-name'], self.dataset[i]['flat-state'][0])
# Rollout variational.
_, _, _ = self.rollout_variational_network(0, i)
self.distances[i] = ((sample_traj-self.variational_trajectory_rollout)**2).mean()
self.mean_distance = self.distances[self.distances>0].mean()
# Create save directory:
upper_dir_name = os.path.join(self.args.logdir,self.args.name,"MEval")
if not(os.path.isdir(upper_dir_name)):
os.mkdir(upper_dir_name)
model_epoch = int(os.path.split(self.args.model)[1].lstrip("Model_epoch"))
self.dir_name = os.path.join(self.args.logdir,self.args.name,"MEval","m{0}".format(model_epoch))
if not(os.path.isdir(self.dir_name)):
os.mkdir(self.dir_name)
np.save(os.path.join(self.dir_name,"Trajectory_Distances_{0}.npy".format(self.args.name)),self.distances)
np.save(os.path.join(self.dir_name,"Mean_Trajectory_Distance_{0}.npy".format(self.args.name)),self.mean_distance)
def evaluate(self, model):
self.set_epoch(0)
if model:
self.load_all_models(model)
np.set_printoptions(suppress=True,precision=2)
print("Running Evaluation of State Distances on small test set.")
self.evaluate_metrics()
# Visualize space if the subpolicy has been trained...
if (self.args.data=='MIME' or self.args.data=='Roboturk' or self.args.data=='OrigRoboturk' or self.args.data=='FullRoboturk' or self.args.data=='Mocap') and (self.args.fix_subpolicy==0):
print("Running Visualization on Robot Data.")
self.pretrain_policy_manager = PolicyManager_Pretrain(self.args.number_policies, self.dataset, self.args)
self.pretrain_policy_manager.setup()
self.pretrain_policy_manager.load_all_models(model, only_policy=True)
self.pretrain_policy_manager.visualize_robot_data()
if self.args.subpolicy_model:
print("Loading encoder.")
self.setup_eval_against_encoder()
# Evaluate NLL and (potentially Expected Value Difference) on Validation / Test Datasets.
self.epsilon = 0.
# np.set_printoptions(suppress=True,precision=2)
# for i in range(60):
# self.run_iteration(0, i)
if self.args.debug:
embed()
class PolicyManager_BaselineRL(PolicyManager_BaseClass):
def __init__(self, number_policies=4, dataset=None, args=None):
# super(PolicyManager_BaselineRL, self).__init__(number_policies=number_policies, dataset=dataset, args=args)
super(PolicyManager_BaselineRL, self).__init__()
# Create environment, setup things, etc.
self.args = args
self.initial_epsilon = self.args.epsilon_from
self.final_epsilon = self.args.epsilon_to
self.decay_episodes = self.args.epsilon_over
self.baseline = None
self. learning_rate = self.args.learning_rate
self.max_timesteps = 100
self.gamma = 0.99
self.batch_size = 10
self.number_test_episodes = 100
# Per step decay.
self.decay_rate = (self.initial_epsilon-self.final_epsilon)/(self.decay_episodes)
self.number_episodes = 5000000
# Orhnstein Ullenhbeck noise process parameters.
self.theta = 0.15
self.sigma = 0.2
self.gripper_open = np.array([0.0115, -0.0115])
self.gripper_closed = np.array([-0.020833, 0.020833])
self.reset_statistics()
def create_networks(self):
if self.args.MLP_policy:
self.policy_network = ContinuousMLP(self.input_size, self.args.hidden_size, self.output_size, self.args).to(device)
self.critic_network = CriticMLP(self.input_size, self.args.hidden_size, 1, self.args).to(device)
else:
# Create policy and critic.
self.policy_network = ContinuousPolicyNetwork(self.input_size, self.args.hidden_size, self.output_size, self.args, self.args.number_layers, small_init=True).to(device)
self.critic_network = CriticNetwork(self.input_size, self.args.hidden_size, 1, self.args, self.args.number_layers).to(device)
def create_training_ops(self):
self.NLL_Loss = torch.nn.NLLLoss(reduction='none')
self.MSE_Loss = torch.nn.MSELoss(reduction='none')
# parameter_list = list(self.policy_network.parameters()) + list(self.critic_network.parameters())
self.policy_optimizer = torch.optim.Adam(self.policy_network.parameters(), lr=self.learning_rate)
self.critic_optimizer = torch.optim.Adam(self.critic_network.parameters(), lr=self.learning_rate)
def save_all_models(self, suffix):
logdir = os.path.join(self.args.logdir, self.args.name)
savedir = os.path.join(logdir,"saved_models")
if not(os.path.isdir(savedir)):
os.mkdir(savedir)
save_object = {}
save_object['Policy_Network'] = self.policy_network.state_dict()
save_object['Critic_Network'] = self.critic_network.state_dict()
torch.save(save_object,os.path.join(savedir,"Model_"+suffix))
def load_all_models(self, path, critic=False):
load_object = torch.load(path)
self.policy_network.load_state_dict(load_object['Policy_Network'])
if critic:
self.critic_network.load_state_dict(load_object['Critic_Network'])
def setup(self):
# Calling a special RL setup function. This is because downstream classes inherit (and may override setup), but will still inherit RL_setup intact.
self.RL_setup()
def RL_setup(self):
# Create Mujoco environment.
self.environment = robosuite.make(self.args.environment, has_renderer=False, use_camera_obs=False, reward_shaping=self.args.shaped_reward)
# Get input and output sizes from these environments, etc.
self.obs = self.environment.reset()
self.output_size = self.environment.action_spec[0].shape[0]
self.state_size = self.obs['robot-state'].shape[0] + self.obs['object-state'].shape[0]
# self.input_size = self.state_size + self.output_size
self.input_size = self.state_size + self.output_size*2
# Create networks.
self.create_networks()
self.create_training_ops()
self.initialize_plots()
# Create Noise process.
self.NoiseProcess = RLUtils.OUNoise(self.output_size, min_sigma=self.args.OU_min_sigma, max_sigma=self.args.OU_max_sigma)
def set_parameters(self, episode_counter, evaluate=False):
if self.args.train and not(evaluate):
if episode_counter<self.decay_episodes:
self.epsilon = self.initial_epsilon-self.decay_rate*episode_counter
else:
self.epsilon = self.final_epsilon
else:
self.epsilon = 0.
def reset_lists(self):
self.reward_trajectory = []
self.state_trajectory = []
self.action_trajectory = []
self.image_trajectory = []
self.terminal_trajectory = []
self.cummulative_rewards = None
self.episode = None
def get_action(self, hidden=None, random=True, counter=0, evaluate=False):
# Change this to epsilon greedy...
whether_greedy = np.random.binomial(n=1,p=0.8)
if random or not(whether_greedy):
action = 2*np.random.random((self.output_size))-1
return action, hidden
# The rest of this will only be evaluated or run when random is false and when whether_greedy is true.
# Assemble states of current input row.
current_input_row = self.get_current_input_row()
# Using the incremental get actions. Still get action greedily, then add noise.
predicted_action, hidden = self.policy_network.incremental_reparam_get_actions(torch.tensor(current_input_row).to(device).float(), greedy=True, hidden=hidden)
if evaluate:
noise = torch.zeros_like(predicted_action).to(device).float()
else:
# Get noise from noise process.
noise = torch.randn_like(predicted_action).to(device).float()*self.epsilon
# Perturb action with noise.
perturbed_action = predicted_action + noise
if self.args.MLP_policy:
action = perturbed_action[-1].detach().cpu().numpy()
else:
action = perturbed_action[-1].squeeze(0).detach().cpu().numpy()
return action, hidden
def get_OU_action(self, hidden=None, random=False, counter=0, evaluate=False):
if random==True:
action = 2*np.random.random((self.output_size))-1
return action, hidden
# Assemble states of current input row.
current_input_row = self.get_current_input_row()
# Using the incremental get actions. Still get action greedily, then add noise.
predicted_action, hidden = self.policy_network.incremental_reparam_get_actions(torch.tensor(current_input_row).to(device).float(), greedy=True, hidden=hidden)
# Numpy action
if self.args.MLP_policy:
action = predicted_action[-1].detach().cpu().numpy()
else:
action = predicted_action[-1].squeeze(0).detach().cpu().numpy()
if evaluate:
perturbed_action = action
else:
# Perturb action with noise.
perturbed_action = self.NoiseProcess.get_action(action, counter)
return perturbed_action, hidden
def rollout(self, random=False, test=False, visualize=False):
counter = 0
eps_reward = 0.
state = self.environment.reset()
terminal = False
self.reset_lists()
# Reset the noise process! We forgot to do this! :(
self.NoiseProcess.reset()
if visualize:
image = self.environment.sim.render(600,600, camera_name='frontview')
self.image_trajectory.append(np.flipud(image))
self.state_trajectory.append(state)
# self.terminal_trajectory.append(terminal)
# self.reward_trajectory.append(0.)
hidden = None
while not(terminal) and counter<self.max_timesteps:
if self.args.OU:
action, hidden = self.get_OU_action(hidden=hidden,random=random,counter=counter, evaluate=test)
else:
action, hidden = self.get_action(hidden=hidden,random=random,counter=counter, evaluate=test)
# Take a step in the environment.
next_state, onestep_reward, terminal, success = self.environment.step(action)
self.state_trajectory.append(next_state)
self.action_trajectory.append(action)
self.reward_trajectory.append(onestep_reward)
self.terminal_trajectory.append(terminal)
# Copy next state into state.
state = copy.deepcopy(next_state)
# Counter
counter += 1
# Append image.
if visualize:
image = self.environment.sim.render(600,600, camera_name='frontview')
self.image_trajectory.append(np.flipud(image))
print("Rolled out an episode for ",counter," timesteps.")
# Now that the episode is done, compute cummulative rewards...
self.cummulative_rewards = copy.deepcopy(np.cumsum(np.array(self.reward_trajectory)[::-1])[::-1])
self.episode_reward_statistics = copy.deepcopy(self.cummulative_rewards[0])
print("Achieved reward: ", self.episode_reward_statistics)
# print("########################################################")
# NOW construct an episode out of this..
self.episode = RLUtils.Episode(self.state_trajectory, self.action_trajectory, self.reward_trajectory, self.terminal_trajectory)
# Since we're doing TD updates, we DON'T want to use the cummulative reward, but rather the reward trajectory itself.
def get_transformed_gripper_value(self, gripper_finger_values):
gripper_values = (gripper_finger_values - self.gripper_open)/(self.gripper_closed - self.gripper_open)
finger_diff = gripper_values[1]-gripper_values[0]
gripper_value = np.array(2*finger_diff-1).reshape((1,-1))
return gripper_value
def get_current_input_row(self):
# Addiong joint states, gripper, actions, and conditional info in addition to just conditional and actions.
gripper_finger_values = self.state_trajectory[-1]['gripper_qpos']
conditional = np.concatenate([self.state_trajectory[-1]['robot-state'].reshape((1,-1)),self.state_trajectory[-1]['object-state'].reshape((1,-1))],axis=1)
if len(self.action_trajectory)>0:
state_action = np.concatenate([self.state_trajectory[-1]['joint_pos'].reshape((1,-1)), self.get_transformed_gripper_value(gripper_finger_values), self.action_trajectory[-1].reshape((1,-1))],axis=1)
else:
# state_action = np.concatenate([self.state_trajectory[-1]['robot-state'].reshape((1,-1)),self.state_trajectory[-1]['object-state'].reshape((1,-1)),np.zeros((1,self.output_size))],axis=1)
state_action = np.concatenate([self.state_trajectory[-1]['joint_pos'].reshape((1,-1)), self.get_transformed_gripper_value(gripper_finger_values), np.zeros((1,self.output_size))],axis=1)
return np.concatenate([state_action, conditional],axis=1)
def assemble_inputs(self):
conditional_sequence = np.concatenate([np.concatenate([self.state_trajectory[t]['robot-state'].reshape((1,-1)),self.state_trajectory[t]['object-state'].reshape((1,-1))],axis=1) for t in range(len(self.state_trajectory))],axis=0)
state_action_sequence = np.concatenate([np.concatenate([self.state_trajectory[t]['joint_pos'].reshape((1,-1)), self.get_transformed_gripper_value(self.state_trajectory[t]['gripper_qpos']), self.action_trajectory[t-1].reshape((1,-1))],axis=1) for t in range(1,len(self.state_trajectory))],axis=0)
initial_state_action = np.concatenate([self.state_trajectory[0]['joint_pos'].reshape((1,-1)), self.get_transformed_gripper_value(self.state_trajectory[0]['gripper_qpos']), np.zeros((1, self.output_size))],axis=1)
# Copy initial state to front of state_action seq.
state_action_sequence = np.concatenate([state_action_sequence, initial_state_action],axis=0)
inputs = np.concatenate([state_action_sequence, conditional_sequence],axis=1)
return inputs
def process_episode(self, episode):
# Assemble states, actions, targets.
# First reset all the lists from the rollout now that they've been written to memory.
self.reset_lists()
# Now set the lists.
self.state_trajectory = episode.state_list
self.action_trajectory = episode.action_list
self.reward_trajectory = episode.reward_list
self.terminal_trajectory = episode.terminal_list
assembled_inputs = self.assemble_inputs()
# Input to the policy should be states and actions.
self.state_action_inputs = torch.tensor(assembled_inputs).to(device).float()
# Get summed reward for statistics.
self.batch_reward_statistics += sum(self.reward_trajectory)
def set_differentiable_critic_inputs(self):
# Get policy's predicted actions by getting action greedily, then add noise.
predicted_action = self.policy_network.reparameterized_get_actions(self.state_action_inputs, greedy=True).squeeze(1)
noise = torch.zeros_like(predicted_action).to(device).float()
# Get noise from noise process.
noise = torch.randn_like(predicted_action).to(device).float()*self.epsilon
# Concatenate the states from policy inputs and the predicted actions.
self.critic_inputs = torch.cat([self.state_action_inputs[:,:self.output_size], predicted_action, self.state_action_inputs[:,2*self.output_size:]],axis=1).to(device).float()
def update_policies(self):
######################################
# Compute losses for actor.
self.set_differentiable_critic_inputs()
self.policy_optimizer.zero_grad()
self.policy_loss = - self.critic_network.forward(self.critic_inputs[:-1]).mean()
self.policy_loss_statistics += self.policy_loss.clone().detach().cpu().numpy().mean()
self.policy_loss.backward()
self.policy_optimizer.step()
def set_targets(self):
if self.args.TD:
# Construct TD Targets.
self.TD_targets = self.critic_predictions.clone().detach().cpu().numpy()
# Select till last time step, because we don't care what critic says after last timestep.
self.TD_targets = np.roll(self.TD_targets,-1,axis=0)[:-1]
# Mask with terminal.
self.TD_targets = self.gamma*np.array(self.terminal_trajectory)*self.TD_targets
self.TD_targets += np.array(self.reward_trajectory)
self.critic_targets = torch.tensor(self.TD_targets).to(device).float()
else:
self.cummulative_rewards = copy.deepcopy(np.cumsum(np.array(self.reward_trajectory)[::-1])[::-1])
self.critic_targets = torch.tensor(self.cummulative_rewards).to(device).float()
def update_critic(self):
######################################
# Zero gradients, then backprop into critic.
self.critic_optimizer.zero_grad()
# Get critic predictions first.
if self.args.MLP_policy:
self.critic_predictions = self.critic_network.forward(self.state_action_inputs).squeeze(1)
else:
self.critic_predictions = self.critic_network.forward(self.state_action_inputs).squeeze(1).squeeze(1)
# Before we actually compute loss, compute targets.
self.set_targets()
# We predicted critic values from states S_1 to S_{T+1} because we needed all for bootstrapping.
# For loss, we don't actually need S_{T+1}, so throw it out.
self.critic_loss = self.MSE_Loss(self.critic_predictions[:-1], self.critic_targets).mean()
self.critic_loss_statistics += self.critic_loss.clone().detach().cpu().numpy().mean()
self.critic_loss.backward()
self.critic_optimizer.step()
######################################
def update_networks(self):
# Update policy network.
self.update_policies()
# Now update critic network.
self.update_critic()
def reset_statistics(self):
# Can also reset the policy and critic loss statistcs here.
self.policy_loss_statistics = 0.
self.critic_loss_statistics = 0.
self.batch_reward_statistics = 0.
self.episode_reward_statistics = 0.
def update_batch(self):
# Get set of indices of episodes in the memory.
batch_indices = self.memory.sample_batch(self.batch_size)
for ind in batch_indices:
# Retrieve appropriate episode from memory.
episode = self.memory.memory[ind]
# Set quantities from episode.
self.process_episode(episode)
# Now compute gradients to both networks from batch.
self.update_networks()
def update_plots(self, counter):
self.tf_logger.scalar_summary('Total Episode Reward', copy.deepcopy(self.episode_reward_statistics), counter)
self.tf_logger.scalar_summary('Batch Rewards', self.batch_reward_statistics/self.batch_size, counter)
self.tf_logger.scalar_summary('Policy Loss', self.policy_loss_statistics/self.batch_size, counter)
self.tf_logger.scalar_summary('Critic Loss', self.critic_loss_statistics/self.batch_size, counter)
if counter%self.args.display_freq==0:
# print("Embedding in Update Plots.")
# Rollout policy.
self.rollout(random=False, test=True, visualize=True)
self.tf_logger.gif_summary("Rollout Trajectory", [np.array(self.image_trajectory)], counter)
# Now that we've updated these into TB, reset stats.
self.reset_statistics()
def run_iteration(self, counter, evaluate=False):
# This is really a run episode function. Ignore the index, just use the counter.
# 1) Rollout trajectory.
# 2) Collect stats / append to memory and stuff.
# 3) Update policies.
self.set_parameters(counter, evaluate=evaluate)
# Maintain counter to keep track of updating the policy regularly.
# cProfile.runctx('self.rollout()',globals(), locals(),sort='cumtime')
self.rollout(random=False, test=evaluate)
if self.args.train and not(evaluate):
# If training, append to memory.
self.memory.append_to_memory(self.episode)
# Update on batch.
self.update_batch()
# Update plots.
self.update_plots(counter)
def initialize_memory(self):
# Create memory object.
self.memory = RLUtils.ReplayMemory(memory_size=self.args.memory_size)
# Number of initial episodes needs to be less than memory size.
self.initial_episodes = self.args.burn_in_eps
# While number of transitions is less than initial_transitions.
episode_counter = 0
while episode_counter<self.initial_episodes:
# Reset the noise process! We forgot to do this! :(
self.NoiseProcess.reset()
print("Initializing Memory Episode: ", episode_counter)
# Rollout an episode.
self.rollout(random=self.args.random_memory_burn_in)
# Add episode to memory.
self.memory.append_to_memory(self.episode)
episode_counter += 1
def evaluate(self, epoch=None, model=None):
if model is not None:
print("Loading model in training.")
self.load_all_models(model)
model_epoch = int(os.path.split(self.args.model)[1].lstrip("Model_epoch"))
else:
model_epoch = epoch
self.total_rewards = np.zeros((self.number_test_episodes))
# For number of test episodes.
for eps in range(self.number_test_episodes):
# Run an iteration (and rollout)...
self.run_iteration(eps, evaluate=True)
self.total_rewards[eps] = np.array(self.reward_trajectory).sum()
# Create save directory to save these results.
upper_dir_name = os.path.join(self.args.logdir,self.args.name,"MEval")
if not(os.path.isdir(upper_dir_name)):
os.mkdir(upper_dir_name)
self.dir_name = os.path.join(self.args.logdir,self.args.name,"MEval","m{0}".format(model_epoch))
if not(os.path.isdir(self.dir_name)):
os.mkdir(self.dir_name)
np.save(os.path.join(self.dir_name,"Total_Rewards_{0}.npy".format(self.args.name)),self.total_rewards)
np.save(os.path.join(self.dir_name,"Mean_Reward_{0}.npy".format(self.args.name)),self.total_rewards.mean())
def train(self, model=None):
# 1) Initialize memory maybe.
# 2) For number of iterations, RUN ITERATION:
# 3) Rollout trajectory.
# 4) Collect stats.
# 5) Update policies.
if model:
print("Loading model in training.")
self.load_all_models(model)
print("Starting Main Training Procedure.")
self.set_parameters(0)
np.set_printoptions(suppress=True,precision=2)
# Fixing seeds.
np.random.seed(seed=0)
torch.manual_seed(0)
print("Initializing Memory.")
self.initialize_memory()
for e in range(self.number_episodes):
# Reset the noise process! We forgot to do this! :(
self.NoiseProcess.reset()
if e%self.args.save_freq==0:
self.save_all_models("epoch{0}".format(e))
self.run_iteration(e)
print("#############################")
print("Running Episode: ",e)
if e%self.args.eval_freq==0:
self.evaluate(epoch=e, model=None)
class PolicyManager_DownstreamRL(PolicyManager_BaselineRL):
def __init__(self, number_policies=4, dataset=None, args=None):
super(PolicyManager_DownstreamRL, self).__init__(number_policies=4, dataset=dataset, args=args)
def setup(self):
# Create Mujoco environment.
self.environment = robosuite.make(self.args.environment, has_renderer=False, use_camera_obs=False, reward_shaping=self.args.shaped_reward)
# Get input and output sizes from these environments, etc.
self.obs = self.environment.reset()
self.output_size = self.environment.action_spec[0].shape[0]
self.state_size = self.environment.action_spec[0].shape[0]
self.conditional_info_size = self.obs['robot-state'].shape[0] + self.obs['object-state'].shape[0]
# If we are loading policies....
if self.args.model:
# Padded conditional info.
self.conditional_info_size = 53
self.input_size = 2*self.state_size
# Create networks.
self.create_networks()
self.create_training_ops()
self.initialize_plots()
self.gripper_open = np.array([0.0115, -0.0115])
self.gripper_closed = np.array([-0.020833, 0.020833])
# Create Noise process.
self.NoiseProcess = RLUtils.OUNoise(self.output_size)
def create_networks(self):
# Copying over the create networks from Joint Policy training.
# Not sure if there's a better way to inherit - unless we inherit from both classes.
self.policy_network = ContinuousPolicyNetwork(self.input_size, self.args.hidden_size, self.output_size, self.args, self.args.number_layers).to(device)
self.critic_network = CriticNetwork(self.input_size+self.conditional_info_size, self.args.hidden_size, 1, self.args, self.args.number_layers).to(device)
if self.args.constrained_b_prior:
self.latent_policy = ContinuousLatentPolicyNetwork_ConstrainedBPrior(self.input_size+self.conditional_info_size, self.args.hidden_size, self.args, self.args.number_layers).to(device)
else:
self.latent_policy = ContinuousLatentPolicyNetwork(self.input_size+self.conditional_info_size, self.args.hidden_size, self.args, self.args.number_layers).to(device)
def create_training_ops(self):
self.NLL_Loss = torch.nn.NLLLoss(reduction='none')
self.MSE_Loss = torch.nn.MSELoss(reduction='none')
# If we are using reparameterization, use a global optimizer for both policies, and a global loss function.
parameter_list = list(self.latent_policy.parameters())
if not(self.args.fix_subpolicy):
parameter_list = parameter_list + list(self.policy_network.parameters())
# The policy optimizer handles both the low and high level policies, as long as the z's being passed from the latent to sub policy are differentiable.
self.policy_optimizer = torch.optim.Adam(parameter_list, lr=self.learning_rate)
self.critic_optimizer = torch.optim.Adam(self.critic_network.parameters(), lr=self.learning_rate)
def save_all_models(self, suffix):
logdir = os.path.join(self.args.logdir, self.args.name)
savedir = os.path.join(logdir,"saved_models")
if not(os.path.isdir(savedir)):
os.mkdir(savedir)
save_object = {}
save_object['Policy_Network'] = self.policy_network.state_dict()
save_object['Latent_Policy'] = self.latent_policy.state_dict()
save_object['Critic_Network'] = self.critic_network.state_dict()
torch.save(save_object,os.path.join(savedir,"Model_"+suffix))
def load_all_models(self, path, critic=False):
load_object = torch.load(path)
self.policy_network.load_state_dict(load_object['Policy_Network'])
if self.args.load_latent:
self.latent_policy.load_state_dict(load_object['Latent_Policy'])
if critic:
self.critic_network.load_state_dict(load_object['Critic_Network'])
def reset_lists(self):
self.reward_trajectory = []
self.state_trajectory = []
self.action_trajectory = []
self.image_trajectory = []
self.terminal_trajectory = []
self.latent_z_trajectory = []
self.latent_b_trajectory = []
self.cummulative_rewards = None
self.episode = None
def get_conditional_information_row(self, t=-1):
# Get robot and object state.
conditional_info_row = np.zeros((1,self.conditional_info_size))
info_value = np.concatenate([self.state_trajectory[t]['robot-state'].reshape((1,-1)),self.state_trajectory[t]['object-state'].reshape((1,-1))],axis=1)
conditional_info_row[0,:info_value.shape[1]] = info_value
return conditional_info_row
def get_transformed_gripper_value(self, gripper_finger_values):
gripper_values = (gripper_finger_values - self.gripper_open)/(self.gripper_closed - self.gripper_open)
finger_diff = gripper_values[1]-gripper_values[0]
gripper_value = np.array(2*finger_diff-1).reshape((1,-1))
return gripper_value
def get_current_input_row(self, t=-1):
# The state that we want is ... joint state?
gripper_finger_values = self.state_trajectory[t]['gripper_qpos']
if len(self.action_trajectory)==0 or t==0:
return np.concatenate([self.state_trajectory[0]['joint_pos'].reshape((1,-1)), np.zeros((1,1)), np.zeros((1,self.output_size))],axis=1)
elif t==-1:
return np.concatenate([self.state_trajectory[t]['joint_pos'].reshape((1,-1)), self.get_transformed_gripper_value(gripper_finger_values), self.action_trajectory[t].reshape((1,-1))],axis=1)
else:
return np.concatenate([self.state_trajectory[t]['joint_pos'].reshape((1,-1)), self.get_transformed_gripper_value(gripper_finger_values), self.action_trajectory[t-1].reshape((1,-1))],axis=1)
def get_latent_input_row(self, t=-1):
# If first timestep, z's are 0 and b is 1.
if len(self.latent_z_trajectory)==0 or t==0:
return np.concatenate([np.zeros((1, self.args.z_dimensions)),np.ones((1,1))],axis=1)
if t==-1:
return np.concatenate([self.latent_z_trajectory[t].reshape((1,-1)),self.latent_b_trajectory[t].reshape((1,1))],axis=1)
elif t>0:
t-=1
return np.concatenate([self.latent_z_trajectory[t].reshape((1,-1)),self.latent_b_trajectory[t].reshape((1,1))],axis=1)
def assemble_latent_input_row(self, t=-1):
# Function to assemble ONE ROW of latent policy input.
# Remember, the latent policy takes.. JOINT_states, actions, z's, b's, and then conditional information of robot-state and object-state.
# Assemble these three pieces:
return np.concatenate([self.get_current_input_row(t), self.get_latent_input_row(t), self.get_conditional_information_row(t)],axis=1)
def assemble_latent_inputs(self):
# Assemble latent policy inputs over time.
return np.concatenate([self.assemble_latent_input_row(t) for t in range(len(self.state_trajectory))],axis=0)
def assemble_subpolicy_input_row(self, latent_z=None, t=-1):
# Remember, the subpolicy takes.. JOINT_states, actions, z's.
# Assemble (remember, without b, and without conditional info).
if latent_z is not None:
# return np.concatenate([self.get_current_input_row(t), latent_z.reshape((1,-1))],axis=1)
# Instead of numpy, use torch.
return torch.cat([torch.tensor(self.get_current_input_row(t)).to(device).float(), latent_z.reshape((1,-1))],dim=1)
else:
# Remember, get_latent_input_row isn't operating on something that needs to be differentiable, so just use numpy and then wrap with torch tensor.
# return torch.tensor(np.concatenate([self.get_current_input_row(t), self.get_latent_input_row(t)[:,:-1]],axis=1)).to(device).float()
return torch.tensor(np.concatenate([self.get_current_input_row(t), self.latent_z_trajectory[t].reshape((1,-1))],axis=1)).to(device).float()
def assemble_subpolicy_inputs(self, latent_z_list=None):
# Assemble sub policy inputs over time.
if latent_z_list is None:
# return np.concatenate([self.assemble_subpolicy_input_row(t) for t in range(len(self.state_trajectory))],axis=0)
# Instead of numpy, use torch...
return torch.cat([self.assemble_subpolicy_input_row(t=t) for t in range(len(self.state_trajectory))],dim=0)
else:
# return np.concatenate([self.assemble_subpolicy_input_row(t, latent_z=latent_z_list[t]) for t in range(len(self.state_trajectory))],axis=0)
# Instead of numpy, use torch...
return torch.cat([self.assemble_subpolicy_input_row(t=t, latent_z=latent_z_list[t]) for t in range(len(self.state_trajectory))],dim=0)
def assemble_state_action_row(self, action=None, t=-1):
# Get state action input row for critic.
if action is not None:
gripper_finger_values = self.state_trajectory[t]['gripper_qpos']
gripper_values = (gripper_finger_values - self.gripper_open)/(self.gripper_closed - self.gripper_open)
finger_diff = gripper_values[1]-gripper_values[0]
gripper_value = np.array(2*finger_diff-1).reshape((1,-1))
# Don't create a torch tensor out of actions.
return torch.cat([torch.tensor(self.state_trajectory[t]['joint_pos']).to(device).float().reshape((1,-1)), torch.tensor(gripper_value).to(device).float(), action.reshape((1,-1)), torch.tensor(self.get_conditional_information_row(t)).to(device).float()],dim=1)
else:
# Just use actions that were used in the trajectory. This doesn't need to be differentiable, because it's going to be used for the critic targets, so just make a torch tensor from numpy.
return torch.tensor(np.concatenate([self.get_current_input_row(t), self.get_conditional_information_row(t)],axis=1)).to(device).float()
def assemble_state_action_inputs(self, action_list=None):
# return np.concatenate([self.assemble_state_action_row(t) for t in range(len(self.state_trajectory))],axis=0)
# Instead of numpy use torch.
if action_list is not None:
return torch.cat([self.assemble_state_action_row(t=t, action=action_list[t]) for t in range(len(self.state_trajectory))],dim=0)
else:
return torch.cat([self.assemble_state_action_row(t=t) for t in range(len(self.state_trajectory))],dim=0)
def get_OU_action_latents(self, policy_hidden=None, latent_hidden=None, random=False, counter=0, previous_z=None, test=False, delta_t=0):
# if random==True:
# action = 2*np.random.random((self.output_size))-1
# return action,
# Get latent policy inputs.
latent_policy_inputs = self.assemble_latent_input_row()
# Feed in latent policy inputs and get the latent policy outputs (z, b, and hidden)
latent_z, latent_b, latent_hidden = self.latent_policy.incremental_reparam_get_actions(torch.tensor(latent_policy_inputs).to(device).float(), greedy=True, hidden=latent_hidden, previous_z=previous_z, delta_t=delta_t)
# Perturb latent_z with some noise.
z_noise = self.epsilon*torch.randn_like(latent_z)
# Add noise to z.
latent_z = latent_z + z_noise
if latent_b[-1]==1:
delta_t = 0
else:
delta_t += 1
# Now get subpolicy inputs.
# subpolicy_inputs = self.assemble_subpolicy_input_row(latent_z.detach().cpu().numpy())
subpolicy_inputs = self.assemble_subpolicy_input_row(latent_z=latent_z)
# Feed in subpolicy inputs and get the subpolicy outputs (a, hidden)
predicted_action, hidden = self.policy_network.incremental_reparam_get_actions(torch.tensor(subpolicy_inputs).to(device).float(), greedy=True, hidden=policy_hidden)
# Numpy action
action = predicted_action[-1].squeeze(0).detach().cpu().numpy()
if test:
perturbed_action = action
else:
# Perturb action with noise.
if self.args.OU:
perturbed_action = self.NoiseProcess.get_action(action, counter)
else:
# Just regular epsilon
perturbed_action = action + self.epsilon*np.random.randn(action.shape[-1])
return perturbed_action, latent_z, latent_b, policy_hidden, latent_hidden, delta_t
def rollout(self, random=False, test=False, visualize=False):
# Reset the noise process! We forgot to do this! :(
self.NoiseProcess.reset()
# Reset some data for the rollout.
counter = 0
eps_reward = 0.
terminal = False
self.reset_lists()
# Reset environment and add state to the list.
state = self.environment.reset()
self.state_trajectory.append(state)
# If we are going to visualize, get an initial image.
if visualize:
image = self.environment.sim.render(600,600, camera_name='frontview')
self.image_trajectory.append(np.flipud(image))
# Instead of maintaining just one LSTM hidden state... now have one for each policy level.
policy_hidden = None
latent_hidden = None
latent_z = None
delta_t = 0
# For number of steps / while we don't terminate:
while not(terminal) and counter<self.max_timesteps:
# Get the action to execute, b, z, and hidden states.
action, latent_z, latent_b, policy_hidden, latent_hidden, delta_t = self.get_OU_action_latents(policy_hidden=policy_hidden, latent_hidden=latent_hidden, random=random, counter=counter, previous_z=latent_z, test=test, delta_t=delta_t)
if self.args.debug:
print("Embed in Trajectory Rollout.")
embed()
# Take a step in the environment.
next_state, onestep_reward, terminal, success = self.environment.step(action)
# Append everything to lists.
self.state_trajectory.append(next_state)
self.action_trajectory.append(action)
self.reward_trajectory.append(onestep_reward)
self.terminal_trajectory.append(terminal)
self.latent_z_trajectory.append(latent_z.detach().cpu().numpy())
self.latent_b_trajectory.append(latent_b.detach().cpu().numpy())
# Copy next state into state.
state = copy.deepcopy(next_state)
# Counter
counter += 1
# Append image to image list if we are visualizing.
if visualize:
image = self.environment.sim.render(600,600, camera_name='frontview')
self.image_trajectory.append(np.flipud(image))
# Now that the episode is done, compute cummulative rewards...
self.cummulative_rewards = copy.deepcopy(np.cumsum(np.array(self.reward_trajectory)[::-1])[::-1])
self.episode_reward_statistics = copy.deepcopy(self.cummulative_rewards[0])
print("Rolled out an episode for ",counter," timesteps.")
print("Achieved reward: ", self.episode_reward_statistics)
# NOW construct an episode out of this..
self.episode = RLUtils.HierarchicalEpisode(self.state_trajectory, self.action_trajectory, self.reward_trajectory, self.terminal_trajectory, self.latent_z_trajectory, self.latent_b_trajectory)
def process_episode(self, episode):
# Assemble states, actions, targets.
# First reset all the lists from the rollout now that they've been written to memory.
self.reset_lists()
# Now set the lists.
self.state_trajectory = episode.state_list
self.action_trajectory = episode.action_list
self.reward_trajectory = episode.reward_list
self.terminal_trajectory = episode.terminal_list
self.latent_z_trajectory = episode.latent_z_list
self.latent_b_trajectory = episode.latent_b_list
# Get summed reward for statistics.
self.batch_reward_statistics += sum(self.reward_trajectory)
# Assembling state_action inputs to feed to the Critic network for TARGETS. (These don't need to, and in fact shouldn't, be differentiable).
self.state_action_inputs = torch.tensor(self.assemble_state_action_inputs()).to(device).float()
def update_policies(self):
# There are a few steps that need to be taken.
# 1) Assemble latent policy inputs.
# 2) Get differentiable latent z's from latent policy.
# 3) Assemble subpolicy inputs with these differentiable latent z's.
# 4) Get differentiable actions from subpolicy.
# 5) Assemble critic inputs with these differentiable actions.
# 6) Now compute critic predictions that are differentiable w.r.t. sub and latent policies.
# 7) Backprop.
# 1) Assemble latent policy inputs. # Remember, these are the only things that don't need to be differentiable.
self.latent_policy_inputs = torch.tensor(self.assemble_latent_inputs()).to(device).float()
# 2) Feed this into latent policy.
latent_z, latent_b, _ = self.latent_policy.incremental_reparam_get_actions(torch.tensor(self.latent_policy_inputs).to(device).float(), greedy=True)
# 3) Assemble subpolicy inputs with diff latent z's. Remember, this needs to be differentiable. Modify the assembling to torch, WITHOUT creating new torch tensors of z.
self.subpolicy_inputs = self.assemble_subpolicy_inputs(latent_z_list=latent_z)
# 4) Feed into subpolicy.
diff_actions, _ = self.policy_network.incremental_reparam_get_actions(self.subpolicy_inputs, greedy=True)
# 5) Now assemble critic inputs.
self.differentiable_critic_inputs = self.assemble_state_action_inputs(action_list=diff_actions)
# 6) Compute critic predictions.
self.policy_loss = - self.critic_network.forward(self.differentiable_critic_inputs[:-1]).mean()
# Also log statistics.
self.policy_loss_statistics += self.policy_loss.clone().detach().cpu().numpy().mean()
# 7) Now backprop into policy.
self.policy_optimizer.zero_grad()
self.policy_loss.backward()
self.policy_optimizer.step()
class PolicyManager_DMPBaselines(PolicyManager_Joint):
# Make it inherit joint policy manager init.
def __init__(self, number_policies=4, dataset=None, args=None):
super(PolicyManager_DMPBaselines, self).__init__(number_policies, dataset, args)
def setup_DMP_parameters(self):
self.output_size
self.number_kernels = 15
self.window = 15
self.kernel_bandwidth = 1.5
self.number_kernels = self.args.baseline_kernels
self.window = self.args.baseline_window
self.kernel_bandwidth = self.args.baseline_kernel_bandwidth
def get_MSE(self, sample_traj, trajectory_rollout):
# Evaluate MSE between reconstruction and sample trajectory.
return ((sample_traj-trajectory_rollout)**2).mean()
def get_FlatDMP_rollout(self, sample_traj, velocities=None):
# Reinitialize DMP Class.
self.dmp = DMP.DMP(time_steps=len(sample_traj), num_ker=self.number_kernels, dimensions=self.state_size, kernel_bandwidth=self.kernel_bandwidth, alphaz=5., time_basis=True)
# Learn DMP for particular trajectory.
self.dmp.learn_DMP(sample_traj)
# Get rollout.
if velocities is not None:
trajectory_rollout = self.dmp.rollout(sample_traj[0],sample_traj[-1],velocities)
else:
trajectory_rollout = self.dmp.rollout(sample_traj[0],sample_traj[-1],np.zeros((self.state_size)))
return trajectory_rollout
def evaluate_FlatDMPBaseline_iteration(self, index, sample_traj):
trajectory_rollout = self.get_FlatDMP_rollout(sample_traj)
self.FlatDMP_distances[index] = self.get_MSE(sample_traj, trajectory_rollout)
def get_AccelerationChangepoint_rollout(self, sample_traj):
# Get magnitudes of acceleration across time.
acceleration_norm = np.linalg.norm(np.diff(sample_traj,n=2,axis=0),axis=1)
# Get velocities.
velocities = np.diff(sample_traj,n=1,axis=0,prepend=sample_traj[0].reshape((1,-1)))
# Find peaks with minimum length = 8.
window = self.window
segmentation = find_peaks(acceleration_norm, distance=window)[0]
if len(segmentation)==0:
segmentation = np.array([0,len(sample_traj)])
else:
# Add start and end to peaks.
if segmentation[0]<window:
segmentation[0] = 0
else:
segmentation = np.insert(segmentation, 0, 0)
# If end segmentation is within WINDOW of end, change segment to end.
if (len(sample_traj) - segmentation[-1])<window:
segmentation[-1] = len(sample_traj)
else:
segmentation = np.insert(segmentation, len(segmentation), sample_traj.shape[0])
trajectory_rollout = np.zeros_like(sample_traj)
# For every segment.
for i in range(len(segmentation)-1):
# Get trajectory segment.
trajectory_segment = sample_traj[segmentation[i]:segmentation[i+1]]
# Get rollout. # Feed velocities into rollout. # First velocity is 0.
segment_rollout = self.get_FlatDMP_rollout(trajectory_segment, velocities[segmentation[i]])
# Copy segment rollout into full rollout.
trajectory_rollout[segmentation[i]:segmentation[i+1]] = segment_rollout
return trajectory_rollout
def evaluate_AccelerationChangepoint_iteration(self, index, sample_traj):
trajectory_rollout = self.get_AccelerationChangepoint_rollout(sample_traj)
self.AccChangepointDMP_distances[index] = self.get_MSE(sample_traj, trajectory_rollout)
def evaluate_MeanRegression_iteration(self, index, sample_traj):
mean = sample_traj.mean(axis=0)
self.MeanRegression_distances[index] = ((sample_traj-mean)**2).mean()
def get_GreedyDMP_rollout(self, sample_traj):
pass
def evaluate_across_testset(self):
self.setup_DMP_parameters()
# Create array for distances.
self.FlatDMP_distances = -np.ones((self.test_set_size))
self.AccChangepointDMP_distances = -np.ones((self.test_set_size))
self.MeanRegression_distances = -np.ones((self.test_set_size))
self.lengths = -np.ones((self.test_set_size))
for i in range(self.test_set_size):
# Set actual index.
index = i + len(self.dataset) - self.test_set_size
if i%100==0:
print("Evaluating Datapoint ", i)
# Get trajectory.
sample_traj, sample_action_seq, concatenated_traj, old_concatenated_traj = self.collect_inputs(i)
if sample_traj is not None:
# Set sample trajectory to ignore gripper.
if self.args.data=='MIME':
sample_traj = sample_traj[:,:-2]
self.state_size = 14
elif self.args.data=='Roboturk' or self.args.data=='OrigRoboturk' or self.args.data=='FullRoboturk':
sample_traj = sample_traj[:,:-1]
self.state_size = 7
# sample_traj = gaussian_filter1d(sample_traj,3.5,axis=0,mode='nearest')
# elif self.args.data=='Mocap':
# sample_traj = sample_traj
self.lengths[i] = len(sample_traj)
# Eval Flat DMP.
self.evaluate_FlatDMPBaseline_iteration(i, sample_traj)
# Eval AccChange DMP Baseline.
self.evaluate_AccelerationChangepoint_iteration(i, sample_traj)
# Evaluate Mean regression Basleine.
self.evaluate_MeanRegression_iteration(i, sample_traj)
# self.mean_distance = self.distances[self.distances>0].mean()
print("Average Distance of Flat DMP Baseline: ", self.FlatDMP_distances[self.FlatDMP_distances>0].mean())
print("Average Distance of Acceleration Changepoint Baseline: ", self.AccChangepointDMP_distances[self.AccChangepointDMP_distances>0].mean())
print("Average Distance of Mean Regression Baseline: ", self.MeanRegression_distances[self.MeanRegression_distances>0].mean())
embed()
class PolicyManager_Imitation(PolicyManager_Pretrain, PolicyManager_BaselineRL):
def __init__(self, number_policies=4, dataset=None, args=None):
super(PolicyManager_Imitation, self).__init__(number_policies=number_policies, dataset=dataset, args=args)
# Explicitly run inits to make sure inheritance is good.
# PolicyManager_Pretrain.__init__(self, number_policies, dataset, args)
# PolicyManager_BaselineRL.__init__(self, args)
# Set train only policy to true.
self.args.train_only_policy = 1
# Get task index from task name.
self.demo_task_index = np.where(np.array(self.dataset.environment_names)==self.args.environment)[0][0]
def setup(self):
# Fixing seeds.
np.random.seed(seed=0)
torch.manual_seed(0)
np.set_printoptions(suppress=True,precision=2)
# Create index list.
extent = self.dataset.get_number_task_demos(self.demo_task_index)
self.index_list = np.arange(0,extent)
# Create Mujoco environment.
self.environment = robosuite.make(self.args.environment, has_renderer=False, use_camera_obs=False, reward_shaping=self.args.shaped_reward)
self.gripper_open = np.array([0.0115, -0.0115])
self.gripper_closed = np.array([-0.020833, 0.020833])
# Get input and output sizes from these environments, etc.
self.obs = self.environment.reset()
self.output_size = self.environment.action_spec[0].shape[0]
self.state_size = self.obs['robot-state'].shape[0] + self.obs['object-state'].shape[0]
self.conditional_info_size = self.state_size
# Input size.. state, action, conditional
self.input_size = self.state_size + self.output_size*2
# Create networks.
self.create_networks()
self.create_training_ops()
self.initialize_plots()
self.total_rewards = 0.
# Create Noise process.
self.NoiseProcess = RLUtils.OUNoise(self.output_size)
def create_networks(self):
# We don't need a decoder.
# Policy Network is the only thing we need.
self.policy_network = ContinuousPolicyNetwork(self.input_size, self.hidden_size, self.output_size, self.args, self.number_layers).to(device)
def save_all_models(self, suffix):
logdir = os.path.join(self.args.logdir, self.args.name)
savedir = os.path.join(logdir,"saved_models")
if not(os.path.isdir(savedir)):
os.mkdir(savedir)
save_object = {}
save_object['Policy_Network'] = self.policy_network.state_dict()
torch.save(save_object,os.path.join(savedir,"Model_"+suffix))
def load_all_models(self, path, only_policy=False):
load_object = torch.load(path)
self.policy_network.load_state_dict(load_object['Policy_Network'])
def update_policies(self, logprobabilities):
# Set gradients to 0.
self.optimizer.zero_grad()
# Set policy loss.
self.policy_loss = -logprobabilities[:-1].mean()
# Backward.
self.policy_loss.backward()
# Take a step.
self.optimizer.step()
def update_plots(self, counter, logprobabilities):
self.tf_logger.scalar_summary('Policy LogLikelihood', torch.mean(logprobabilities), counter)
if counter%self.args.display_freq==0:
# print("Embedding in Update Plots.")
# Rollout policy.
self.rollout(random=False, test=True, visualize=True)
self.tf_logger.gif_summary("Rollout Trajectory", [np.array(self.image_trajectory)], counter)
def run_iteration(self, counter, i):
self.set_epoch(counter)
self.iter = counter
############# (0) #############
# Get sample we're going to train on.
sample_traj, sample_action_seq, concatenated_traj, old_concatenated_traj = self.collect_inputs(i)
if sample_traj is not None:
# Now concatenate info with... conditional_information
policy_inputs = np.concatenate([concatenated_traj, self.conditional_information], axis=1)
# Add zeros to the last action, so that we evaluate likelihood correctly. Since we're using demo actions, no need.
# padded_action_seq = np.concatenate([sample_action_seq, np.zeros((1,self.output_size))],axis=0)
# Feed concatenated trajectory into the policy.
logprobabilities, _ = self.policy_network.forward(torch.tensor(policy_inputs).to(device).float(), sample_action_seq)
if self.args.train:
if self.args.debug:
if self.iter%self.args.debug==0:
print("Embedding in Train Function.")
embed()
# Update policy.
self.update_policies(logprobabilities)
# Update plots.
self.update_plots(counter, logprobabilities)
def get_transformed_gripper_value(self, gripper_finger_values):
gripper_values = (gripper_finger_values - self.gripper_open)/(self.gripper_closed - self.gripper_open)
finger_diff = gripper_values[1]-gripper_values[0]
gripper_value = np.array(2*finger_diff-1).reshape((1,-1))
return gripper_value
def get_state_action_row(self, t=-1):
# The state that we want is ... joint state?
gripper_finger_values = self.state_trajectory[t]['gripper_qpos']
if len(self.action_trajectory)==0 or t==0:
return np.concatenate([self.state_trajectory[0]['joint_pos'].reshape((1,-1)), np.zeros((1,1)), np.zeros((1,self.output_size))],axis=1)
elif t==-1:
return np.concatenate([self.state_trajectory[t]['joint_pos'].reshape((1,-1)), self.get_transformed_gripper_value(gripper_finger_values), self.action_trajectory[t].reshape((1,-1))],axis=1)
else:
return np.concatenate([self.state_trajectory[t]['joint_pos'].reshape((1,-1)), self.get_transformed_gripper_value(gripper_finger_values), self.action_trajectory[t-1].reshape((1,-1))],axis=1)
def get_current_input_row(self, t=-1):
# Rewrite this funciton so that the baselineRL Rollout class can still use it here...
# First get conditional information.
# Get robot and object state.
conditional_info = np.concatenate([self.state_trajectory[t]['robot-state'].reshape((1,-1)),self.state_trajectory[t]['object-state'].reshape((1,-1))],axis=1)
# Get state actions..
state_action = self.get_state_action_row()
# Concatenate.
input_row = np.concatenate([state_action, conditional_info],axis=1)
return input_row
def evaluate(self, epoch=None, model=None):
if model is not None:
self.load_all_models(model)
model_epoch = int(os.path.split(self.args.model)[1].lstrip("Model_epoch"))
else:
model_epoch = epoch
self.total_rewards = np.zeros((self.number_test_episodes))
# Set parameters like epsilon.
self.set_parameters(0, evaluate=True)
# For number of test episodes.
for eps in range(self.number_test_episodes):
# Now run a rollout.
self.rollout(random=False, test=True)
self.total_rewards[eps] = np.array(self.reward_trajectory).sum()
# Create save directory to save these results.
upper_dir_name = os.path.join(self.args.logdir,self.args.name,"MEval")
if not(os.path.isdir(upper_dir_name)):
os.mkdir(upper_dir_name)
self.dir_name = os.path.join(self.args.logdir,self.args.name,"MEval","m{0}".format(model_epoch))
if not(os.path.isdir(self.dir_name)):
os.mkdir(self.dir_name)
np.save(os.path.join(self.dir_name,"Total_Rewards_{0}.npy".format(self.args.name)),self.total_rewards)
np.save(os.path.join(self.dir_name,"Mean_Reward_{0}.npy".format(self.args.name)),self.total_rewards.mean())
# Add average reward to tensorboard.
self.tf_logger.scalar_summary('Average Reward', self.total_rewards.mean(), model_epoch)
def train(self, model=None):
if model:
print("Loading model in training.")
self.load_all_models(model)
counter = 0
# For number of training epochs.
for e in range(self.number_epochs):
print("Starting Epoch: ",e)
if e%self.args.save_freq==0:
self.save_all_models("epoch{0}".format(e))
# self.automatic_evaluation(e)
np.random.shuffle(self.index_list)
if self.args.debug:
print("Embedding in Outer Train Function.")
embed()
# For every item in the epoch:
if self.args.setting=='imitation':
extent = self.dataset.get_number_task_demos(self.demo_task_index)
else:
extent = len(self.dataset)-self.test_set_size
for i in range(extent):
print("Epoch: ",e," Trajectory:",i, "Datapoint: ", self.index_list[i])
self.run_iteration(counter, self.index_list[i])
counter = counter+1
if e%self.args.eval_freq==0:
self.evaluate(e)
self.write_and_close()
class PolicyManager_Transfer(PolicyManager_BaseClass):
def __init__(self, args=None, source_dataset=None, target_dataset=None):
super(PolicyManager_Transfer, self).__init__()
# The inherited functions refer to self.args. Also making this to make inheritance go smooth.
self.args = args
# Before instantiating policy managers of source or target domains; create copies of args with data attribute changed.
self.source_args = copy.deepcopy(args)
self.source_args.data = self.source_args.source_domain
self.source_dataset = source_dataset
self.target_args = copy.deepcopy(args)
self.target_args.data = self.target_args.target_domain
self.target_dataset = target_dataset
# Now create two instances of policy managers for each domain. Call them source and target domain policy managers.
self.source_manager = PolicyManager_Pretrain(dataset=self.source_dataset, args=self.source_args)
self.target_manager = PolicyManager_Pretrain(dataset=self.target_dataset, args=self.target_args)
self.source_dataset_size = len(self.source_manager.dataset) - self.source_manager.test_set_size
self.target_dataset_size = len(self.target_manager.dataset) - self.target_manager.test_set_size
# Now create variables that we need.
self.number_epochs = 200
self.extent = max(self.source_dataset_size, self.target_dataset_size)
# Now setup networks for these PolicyManagers.
self.source_manager.setup()
self.target_manager.setup()
# Now define other parameters that will be required for the discriminator, etc.
self.input_size = self.args.z_dimensions
self.hidden_size = self.args.hidden_size
self.output_size = 2
self.learning_rate = self.args.learning_rate
def set_iteration(self, counter):
# Based on what phase of training we are in, set discriminability loss weight, etc.
# Phase 1 of training: Don't train discriminator at all, set discriminability loss weight to 0.
if counter<self.args.training_phase_size:
self.discriminability_loss_weight = 0.
self.vae_loss_weight = 1.
self.training_phase = 1
self.skip_vae = False
self.skip_discriminator = True
# Phase 2 of training: Train the discriminator, and set discriminability loss weight to original.
else:
self.discriminability_loss_weight = self.args.discriminability_weight
self.vae_loss_weight = self.args.vae_loss_weight
# Now make discriminator and vae train in alternating fashion.
# Set number of iterations of alteration.
# self.alternating_phase_size = self.args.alternating_phase_size*self.extent
# # If odd epoch, train discriminator. (Just so that we start training discriminator first).
# if (counter/self.alternating_phase_size)%2==1:
# self.skip_discriminator = False
# self.skip_vae = True
# # Otherwise train VAE.
# else:
# self.skip_discriminator = True
# self.skip_vae = False
# Train discriminator for k times as many steps as VAE. Set args.alternating_phase_size as 1 for this.
if (counter/self.args.alternating_phase_size)%(self.args.discriminator_phase_size+1)>=1:
print("Training Discriminator.")
self.skip_discriminator = False
self.skip_vae = True
# Otherwise train VAE.
else:
print("Training VAE.")
self.skip_discriminator = True
self.skip_vae = False
self.training_phase = 2
self.source_manager.set_epoch(counter)
self.target_manager.set_epoch(counter)
def create_networks(self):
# Call create networks from each of the policy managers.
self.source_manager.create_networks()
self.target_manager.create_networks()
# Now must also create discriminator.
self.discriminator_network = DiscreteMLP(self.input_size, self.hidden_size, self.output_size).to(device)
def create_training_ops(self):
# # Call create training ops from each of the policy managers. Need these optimizers, because the encoder-decoders get a different loss than the discriminator.
self.source_manager.create_training_ops()
self.target_manager.create_training_ops()
# Create BCE loss object.
# self.BCE_loss = torch.nn.BCELoss(reduction='None')
self.negative_log_likelihood_loss_function = torch.nn.NLLLoss(reduction='none')
# Create common optimizer for source, target, and discriminator networks.
self.discriminator_optimizer = torch.optim.Adam(self.discriminator_network.parameters(),lr=self.learning_rate)
def save_all_models(self, suffix):
self.logdir = os.path.join(self.args.logdir, self.args.name)
self.savedir = os.path.join(self.logdir,"saved_models")
if not(os.path.isdir(self.savedir)):
os.mkdir(self.savedir)
self.save_object = {}
# Source
self.save_object['Source_Policy_Network'] = self.source_manager.policy_network.state_dict()
self.save_object['Source_Encoder_Network'] = self.source_manager.encoder_network.state_dict()
# Target
self.save_object['Target_Policy_Network'] = self.target_manager.policy_network.state_dict()
self.save_object['Target_Encoder_Network'] = self.target_manager.encoder_network.state_dict()
# Discriminator
self.save_object['Discriminator_Network'] = self.discriminator_network.state_dict()
torch.save(self.save_object,os.path.join(self.savedir,"Model_"+suffix))
def load_all_models(self, path):
self.load_object = torch.load(path)
# Source
self.source_manager.policy_network.load_state_dict(self.load_object['Source_Policy_Network'])
self.source_manager.encoder_network.load_state_dict(self.load_object['Source_Encoder_Network'])
# Target
self.target_manager.policy_network.load_state_dict(self.load_object['Target_Policy_Network'])
self.target_manager.encoder_network.load_state_dict(self.load_object['Target_Encoder_Network'])
# Discriminator
self.discriminator_network.load_state_dict(self.load_object['Discriminator_Network'])
def get_domain_manager(self, domain):
# Create a list, and just index into this list.
domain_manager_list = [self.source_manager, self.target_manager]
return domain_manager_list[domain]
def get_trajectory_segment_tuple(self, source_manager, target_manager):
# Sample indices.
source_index = np.random.randint(0, high=self.source_dataset_size)
target_index = np.random.randint(0, high=self.target_dataset_size)
# Get trajectory segments.
source_trajectory_segment, source_action_seq, _ = source_manager.get_trajectory_segment(source_manager.index_list[source_index])
target_trajectory_segment, target_action_seq, _ = target_manager.get_trajectory_segment(target_manager.index_list[target_index])
return source_trajectory_segment, source_action_seq, target_trajectory_segment, target_action_seq
def encode_decode_trajectory(self, policy_manager, i, return_trajectory=False, trajectory_input=None):
# This should basically replicate the encode-decode steps in run_iteration of the Pretrain_PolicyManager.
############# (0) #############
# Sample trajectory segment from dataset.
# Check if the index is too big. If yes, just sample randomly.
if i >= len(policy_manager.dataset):
i = np.random.randint(0, len(policy_manager.dataset))
if trajectory_input is not None:
# Grab trajectory segment from tuple.
torch_traj_seg = trajectory_input['target_trajectory_rollout']
else:
trajectory_segment, sample_action_seq, sample_traj = policy_manager.get_trajectory_segment(i)
# Torchify trajectory segment.
torch_traj_seg = torch.tensor(trajectory_segment).to(device).float()
if trajectory_segment is not None:
############# (1) #############
# Encode trajectory segment into latent z.
latent_z, encoder_loglikelihood, encoder_entropy, kl_divergence = policy_manager.encoder_network.forward(torch_traj_seg, policy_manager.epsilon)
########## (2) & (3) ##########
# Feed latent z and trajectory segment into policy network and evaluate likelihood.
latent_z_seq, latent_b = policy_manager.construct_dummy_latents(latent_z)
# If we are using the pre-computed trajectory input, (in second encode_decode call, from target trajectory to target latent z.)
# Don't assemble trajectory in numpy, just take the previous subpolicy_inputs, and then clone it and replace the latent z in it.
if trajectory_input is not None:
# Now assigned trajectory_input['target_subpolicy_inputs'].clone() to SubPolicy_inputs, and then replace the latent z's.
subpolicy_inputs = trajectory_input['target_subpolicy_inputs'].clone()
subpolicy_inputs[:,2*self.state_dim:-1] = latent_z_seq
# Now get "sample_action_seq" for forward function.
sample_action_seq = subpolicy_inputs[:,self.state_dim:2*self.state_dim].clone()
else:
_, subpolicy_inputs, sample_action_seq = policy_manager.assemble_inputs(trajectory_segment, latent_z_seq, latent_b, sample_action_seq)
# Policy net doesn't use the decay epislon. (Because we never sample from it in training, only rollouts.)
loglikelihoods, _ = policy_manager.policy_network.forward(subpolicy_inputs, sample_action_seq)
loglikelihood = loglikelihoods[:-1].mean()
if return_trajectory:
return sample_traj, latent_z
else:
return subpolicy_inputs, latent_z, loglikelihood, kl_divergence
if return_trajectory:
return None, None
else:
return None, None, None, None
def update_plots(self, counter, viz_dict):
# VAE Losses.
self.tf_logger.scalar_summary('Policy LogLikelihood', self.likelihood_loss, counter)
self.tf_logger.scalar_summary('Discriminability Loss', self.discriminability_loss, counter)
self.tf_logger.scalar_summary('Encoder KL', self.encoder_KL, counter)
self.tf_logger.scalar_summary('VAE Loss', self.VAE_loss, counter)
self.tf_logger.scalar_summary('Total VAE Loss', self.total_VAE_loss, counter)
self.tf_logger.scalar_summary('Domain', viz_dict['domain'], counter)
# Plot discriminator values after we've started training it.
if self.training_phase>1:
# Discriminator Loss.
self.tf_logger.scalar_summary('Discriminator Loss', self.discriminator_loss, counter)
# Compute discriminator prob of right action for logging.
self.tf_logger.scalar_summary('Discriminator Probability', viz_dict['discriminator_probs'], counter)
# If we are displaying things:
if counter%self.args.display_freq==0:
self.gt_gif_list = []
self.rollout_gif_list = []
# Now using both TSNE and PCA.
# Plot source, target, and shared embeddings via TSNE.
tsne_source_embedding, tsne_target_embedding, tsne_combined_embeddings, tsne_combined_traj_embeddings = self.get_embeddings(projection='tsne')
# Now actually plot the images.
self.tf_logger.image_summary("TSNE Source Embedding", [tsne_source_embedding], counter)
self.tf_logger.image_summary("TSNE Target Embedding", [tsne_target_embedding], counter)
self.tf_logger.image_summary("TSNE Combined Embeddings", [tsne_combined_embeddings], counter)
# Plot source, target, and shared embeddings via PCA.
pca_source_embedding, pca_target_embedding, pca_combined_embeddings, pca_combined_traj_embeddings = self.get_embeddings(projection='pca')
# Now actually plot the images.
self.tf_logger.image_summary("PCA Source Embedding", [pca_source_embedding], counter)
self.tf_logger.image_summary("PCA Target Embedding", [pca_target_embedding], counter)
self.tf_logger.image_summary("PCA Combined Embeddings", [pca_combined_embeddings], counter)
if self.args.source_domain=='ContinuousNonZero' and self.args.target_domain=='ContinuousNonZero':
self.tf_logger.image_summary("PCA Combined Trajectory Embeddings", [pca_combined_traj_embeddings], counter)
self.tf_logger.image_summary("TSNE Combined Trajectory Embeddings", [tsne_combined_traj_embeddings], counter)
# We are also going to log Ground Truth trajectories and their reconstructions in each of the domains, to make sure our networks are learning.
# Should be able to use the policy manager's functions to do this.
source_trajectory, source_reconstruction, target_trajectory, target_reconstruction = self.get_trajectory_visuals()
if source_trajectory is not None:
# Now actually plot the images.
if self.args.source_domain=='ContinuousNonZero':
self.tf_logger.image_summary("Source Trajectory", [source_trajectory], counter)
self.tf_logger.image_summary("Source Reconstruction", [source_reconstruction], counter)
else:
self.tf_logger.gif_summary("Source Trajectory", [source_trajectory], counter)
self.tf_logger.gif_summary("Source Reconstruction", [source_reconstruction], counter)
if self.args.target_domain=='ContinuousNonZero':
self.tf_logger.image_summary("Target Trajectory", [target_trajectory], counter)
self.tf_logger.image_summary("Target Reconstruction", [target_reconstruction], counter)
else:
self.tf_logger.gif_summary("Target Trajectory", [target_trajectory], counter)
self.tf_logger.gif_summary("Target Reconstruction", [target_reconstruction], counter)
if self.args.source_domain=='ContinuousNonZero' and self.args.target_domain=='ContinuousNonZero':
# Evaluate metrics and plot them.
# self.evaluate_correspondence_metrics(computed_sets=False)
# Actually, we've probably computed trajectory and latent sets.
self.evaluate_correspondence_metrics()
self.tf_logger.scalar_summary('Source To Target Trajectory Distance', self.source_target_trajectory_distance, counter)
self.tf_logger.scalar_summary('Target To Source Trajectory Distance', self.target_source_trajectory_distance, counter)
def get_transform(self, latent_z_set, projection='tsne', shared=False):
if shared:
# If this set of z's contains z's from both source and target domains, mean-std normalize them independently.
normed_z = np.zeros_like(latent_z_set)
# Normalize source.
source_mean = latent_z_set[:self.N].mean(axis=0)
source_std = latent_z_set[:self.N].std(axis=0)
normed_z[:self.N] = (latent_z_set[:self.N]-source_mean)/source_std
# Normalize target.
target_mean = latent_z_set[self.N:].mean(axis=0)
target_std = latent_z_set[self.N:].std(axis=0)
normed_z[self.N:] = (latent_z_set[self.N:]-target_mean)/target_std
else:
# Just normalize z's.
mean = latent_z_set.mean(axis=0)
std = latent_z_set.std(axis=0)
normed_z = (latent_z_set-mean)/std
if projection=='tsne':
# Use TSNE to project the data:
tsne = skl_manifold.TSNE(n_components=2,random_state=0)
embedded_zs = tsne.fit_transform(normed_z)
scale_factor = 1
scaled_embedded_zs = scale_factor*embedded_zs
return scaled_embedded_zs, tsne
elif projection=='pca':
# Use PCA to project the data:
pca_object = PCA(n_components=2)
embedded_zs = pca_object.fit_transform(normed_z)
return embedded_zs, pca_object
def transform_zs(self, latent_z_set, transforming_object):
# Simply just transform according to a fit transforming_object.
return transforming_object.transform(latent_z_set)
# @profile
def get_embeddings(self, projection='tsne'):
# Function to visualize source, target, and combined embeddings:
self.N = 100
self.source_latent_zs = np.zeros((self.N,self.args.z_dimensions))
self.target_latent_zs = np.zeros((self.N,self.args.z_dimensions))
self.shared_latent_zs = np.zeros((2*self.N,self.args.z_dimensions))
# For N data points:
for i in range(self.N):
# Get corresponding latent z's of source and target domains.
_, source_z, _, _ = self.encode_decode_trajectory(self.source_manager, i)
_, target_z, _, _ = self.encode_decode_trajectory(self.target_manager, i)
if source_z is not None:
self.source_latent_zs[i] = source_z.detach().cpu().numpy()
self.shared_latent_zs[i] = source_z.detach().cpu().numpy()
if target_z is not None:
self.target_latent_zs[i] = target_z.detach().cpu().numpy()
self.shared_latent_zs[self.N+i] = target_z.detach().cpu().numpy()
if projection=='tsne':
# Use TSNE to transform data.
source_embedded_zs, _ = self.get_transform(self.source_latent_zs, projection)
target_embedded_zs, _ = self.get_transform(self.target_latent_zs, projection)
shared_embedded_zs, _ = self.get_transform(self.shared_latent_zs, projection, shared=True)
elif projection=='pca':
# Now fit PCA to source.
source_embedded_zs, pca = self.get_transform(self.source_latent_zs, projection)
target_embedded_zs = self.transform_zs(self.target_latent_zs, pca)
shared_embedded_zs = np.concatenate([source_embedded_zs, target_embedded_zs],axis=0)
source_image = self.plot_embedding(source_embedded_zs, "Source_Embedding")
target_image = self.plot_embedding(target_embedded_zs, "Target_Embedding")
shared_image = self.plot_embedding(shared_embedded_zs, "Shared_Embedding", shared=True)
toy_shared_embedding_image = None
if self.args.source_domain=='ContinuousNonZero' and self.args.target_domain=='ContinuousNonZero':
toy_shared_embedding_image = self.plot_embedding(shared_embedded_zs, "Toy_Shared_Traj_Embedding", shared=True, trajectory=True)
return source_image, target_image, shared_image, toy_shared_embedding_image
# @profile
def plot_embedding(self, embedded_zs, title, shared=False, trajectory=False):
fig = plt.figure()
ax = fig.gca()
if shared:
colors = 0.2*np.ones((2*self.N))
colors[self.N:] = 0.8
else:
colors = 0.2*np.ones((self.N))
if trajectory:
# Create a scatter plot of the embedding.
self.source_manager.get_trajectory_and_latent_sets()
self.target_manager.get_trajectory_and_latent_sets()
ratio = 0.4
color_scaling = 15
# Assemble shared trajectory set.
traj_length = len(self.source_manager.trajectory_set[0,:,0])
self.shared_trajectory_set = np.zeros((2*self.N, traj_length, 2))
self.shared_trajectory_set[:self.N] = self.source_manager.trajectory_set
self.shared_trajectory_set[self.N:] = self.target_manager.trajectory_set
color_range_min = 0.2*color_scaling
color_range_max = 0.8*color_scaling+traj_length-1
for i in range(2*self.N):
ax.scatter(embedded_zs[i,0]+ratio*self.shared_trajectory_set[i,:,0],embedded_zs[i,1]+ratio*self.shared_trajectory_set[i,:,1],c=colors[i]*color_scaling+range(traj_length),cmap='jet',vmin=color_range_min,vmax=color_range_max)
else:
# Create a scatter plot of the embedding.
ax.scatter(embedded_zs[:,0],embedded_zs[:,1],c=colors,vmin=0,vmax=1,cmap='jet')
# Title.
ax.set_title("{0}".format(title),fontdict={'fontsize':40})
fig.canvas.draw()
# Grab image.
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8).reshape(int(height), int(width), 3)
image = np.transpose(image, axes=[2,0,1])
return image
def get_trajectory_visuals(self):
i = np.random.randint(0,high=self.extent)
# First get a trajectory, starting point, and latent z.
source_trajectory, source_latent_z = self.encode_decode_trajectory(self.source_manager, i, return_trajectory=True)
if source_trajectory is not None:
# Reconstruct using the source domain manager.
_, source_trajectory_image, source_reconstruction_image = self.source_manager.get_robot_visuals(0, source_latent_z, source_trajectory, return_image=True)
# Now repeat the same for target domain - First get a trajectory, starting point, and latent z.
target_trajectory, target_latent_z = self.encode_decode_trajectory(self.target_manager, i, return_trajectory=True)
# Reconstruct using the target domain manager.
_, target_trajectory_image, target_reconstruction_image = self.target_manager.get_robot_visuals(0, target_latent_z, target_trajectory, return_image=True)
return np.array(source_trajectory_image), np.array(source_reconstruction_image), np.array(target_trajectory_image), np.array(target_reconstruction_image)
else:
return None, None, None, None
def update_networks(self, domain, policy_manager, policy_loglikelihood, encoder_KL, discriminator_loglikelihood, latent_z):
#######################
# Update VAE portion.
#######################
# Zero out gradients of encoder and decoder (policy).
policy_manager.optimizer.zero_grad()
# Compute VAE loss on the current domain as likelihood plus weighted KL.
self.likelihood_loss = -policy_loglikelihood.mean()
self.encoder_KL = encoder_KL.mean()
self.VAE_loss = self.likelihood_loss + self.args.kl_weight*self.encoder_KL
# Compute discriminability loss for encoder (implicitly ignores decoder).
# Pretend the label was the opposite of what it is, and train the encoder to make the discriminator think this was what was true.
# I.e. train encoder to make discriminator maximize likelihood of wrong label.
self.discriminability_loss = self.negative_log_likelihood_loss_function(discriminator_loglikelihood.squeeze(1), torch.tensor(1-domain).to(device).long().view(1,))
# Total encoder loss:
self.total_VAE_loss = self.vae_loss_weight*self.VAE_loss + self.discriminability_loss_weight*self.discriminability_loss
if not(self.skip_vae):
# Go backward through the generator (encoder / decoder), and take a step.
self.total_VAE_loss.backward()
policy_manager.optimizer.step()
#######################
# Update Discriminator.
#######################
# Zero gradients of discriminator.
self.discriminator_optimizer.zero_grad()
# If we tried to zero grad the discriminator and then use NLL loss on it again, Pytorch would cry about going backward through a part of the graph that we already \
# went backward through. Instead, just pass things through the discriminator again, but this time detaching latent_z.
discriminator_logprob, discriminator_prob = self.discriminator_network(latent_z.detach())
# Compute discriminator loss for discriminator.
self.discriminator_loss = self.negative_log_likelihood_loss_function(discriminator_logprob.squeeze(1), torch.tensor(domain).to(device).long().view(1,))
if not(self.skip_discriminator):
# Now go backward and take a step.
self.discriminator_loss.backward()
self.discriminator_optimizer.step()
# @profile
def run_iteration(self, counter, i):
# Phases:
# Phase 1: Train encoder-decoder for both domains initially, so that discriminator is not fed garbage.
# Phase 2: Train encoder, decoder for each domain, and discriminator concurrently.
# Algorithm:
# For every epoch:
# # For every datapoint:
# # 1) Select which domain to use (source or target, i.e. with 50% chance, select either domain).
# # 2) Get trajectory segments from desired domain.
# # 3) Encode trajectory segments into latent z's and compute likelihood of trajectory actions under the decoder.
# # 4) Feed into discriminator, get likelihood of each domain.
# # 5) Compute and apply gradient updates.
# Remember to make domain agnostic function calls to encode, feed into discriminator, get likelihoods, etc.
# (0) Setup things like training phases, epislon values, etc.
self.set_iteration(counter)
# (1) Select which domain to run on. This is supervision of discriminator.
domain = np.random.binomial(1,0.5)
# (1.5) Get domain policy manager.
policy_manager = self.get_domain_manager(domain)
# (2) & (3) Get trajectory segment and encode and decode.
subpolicy_inputs, latent_z, loglikelihood, kl_divergence = self.encode_decode_trajectory(policy_manager, i)
if latent_z is not None:
# (4) Feed latent z's to discriminator, and get discriminator likelihoods.
discriminator_logprob, discriminator_prob = self.discriminator_network(latent_z)
# (5) Compute and apply gradient updates.
self.update_networks(domain, policy_manager, loglikelihood, kl_divergence, discriminator_logprob, latent_z)
# Now update Plots.
viz_dict = {'domain': domain, 'discriminator_probs': discriminator_prob.squeeze(0).squeeze(0)[domain].detach().cpu().numpy()}
self.update_plots(counter, viz_dict)
# Run memory profiling.
# @profile
def set_neighbor_objects(self, computed_sets=False):
if not(computed_sets):
self.source_manager.get_trajectory_and_latent_sets()
self.target_manager.get_trajectory_and_latent_sets()
# Compute nearest neighbors for each set. First build KD-Trees / Ball-Trees.
self.source_neighbors_object = NearestNeighbors(n_neighbors=1, algorithm='auto').fit(self.source_manager.latent_z_set)
self.target_neighbors_object = NearestNeighbors(n_neighbors=1, algorithm='auto').fit(self.target_manager.latent_z_set)
self.neighbor_obj_set = True
def evaluate_correspondence_metrics(self, computed_sets=True):
print("Evaluating correspondence metrics.")
# Evaluate the correspondence and alignment metrics.
# Whether latent_z_sets and trajectory_sets are already computed for each manager.
self.set_neighbor_objects(computed_sets)
# if not(computed_sets):
# self.source_manager.get_trajectory_and_latent_sets()
# self.target_manager.get_trajectory_and_latent_sets()
# # Compute nearest neighbors for each set. First build KD-Trees / Ball-Trees.
# self.source_neighbors_object = NearestNeighbors(n_neighbors=1, algorithm='auto').fit(self.source_manager.latent_z_set)
# self.target_neighbors_object = NearestNeighbors(n_neighbors=1, algorithm='auto').fit(self.target_manager.latent_z_set)
# Compute neighbors.
_, source_target_neighbors = self.source_neighbors_object.kneighbors(self.target_manager.latent_z_set)
_, target_source_neighbors = self.target_neighbors_object.kneighbors(self.source_manager.latent_z_set)
# # Now compute trajectory distances for neighbors.
# source_target_trajectory_diffs = (self.source_manager.trajectory_set - self.target_manager.trajectory_set[source_target_neighbors.squeeze(1)])
# self.source_target_trajectory_distance = copy.deepcopy(np.linalg.norm(source_target_trajectory_diffs,axis=(1,2)).mean())
# target_source_trajectory_diffs = (self.target_manager.trajectory_set - self.source_manager.trajectory_set[target_source_neighbors.squeeze(1)])
# self.target_source_trajectory_distance = copy.deepcopy(np.linalg.norm(target_source_trajectory_diffs,axis=(1,2)).mean())
# Remember, absolute trajectory differences is meaningless, since the data is randomly initialized across the state space.
# Instead, compare actions. I.e. first compute differences along the time dimension.
source_traj_actions = np.diff(self.source_manager.trajectory_set,axis=1)
target_traj_actions = np.diff(self.target_manager.trajectory_set,axis=1)
source_target_trajectory_diffs = (source_traj_actions - target_traj_actions[source_target_neighbors.squeeze(1)])
self.source_target_trajectory_distance = copy.deepcopy(np.linalg.norm(source_target_trajectory_diffs,axis=(1,2)).mean())
target_source_trajectory_diffs = (target_traj_actions - source_traj_actions[target_source_neighbors.squeeze(1)])
self.target_source_trajectory_distance = copy.deepcopy(np.linalg.norm(target_source_trajectory_diffs,axis=(1,2)).mean())
# Reset variables to prevent memory leaks.
# source_neighbors_object = None
# target_neighbors_object = None
del self.source_neighbors_object
del self.target_neighbors_object
def evaluate(self, model=None):
# Evaluating Transfer - we just want embeddings of both source and target; so run evaluate of both source and target policy managers.
# Instead of parsing and passing model to individual source and target policy managers, just load using the transfer policy manager, and then run eval.
if model is not None:
self.load_all_models(model)
# Run source policy manager evaluate.
self.source_manager.evaluate(suffix="Source")
# Run target policy manager evaluate.
self.target_manager.evaluate(suffix="Target")
# Evaluate metrics.
self.evaluate_correspondence_metrics()
def automatic_evaluation(self, e):
pass
# Writing a cycle consistency transfer PM class.
class PolicyManager_CycleConsistencyTransfer(PolicyManager_Transfer):
# Inherit from transfer.
def __init__(self, args=None, source_dataset=None, target_dataset=None):
super(PolicyManager_CycleConsistencyTransfer, self).__init__(args, source_dataset, target_dataset)
self.neighbor_obj_set = False
# Don't actually need to define these functions since they perform same steps as super functions.
# def create_networks(self):
# super().create_networks()
# # Must also create two discriminator networks; one for source --> target --> source, one for target --> source --> target.
# # Remember, since these discriminator networks are operating on the trajectory space, we have to
# # make them LSTM networks, rather than MLPs.
# # # We have the encoder network class that's perfect for this. Output size is 2.
# # self.source_discriminator = EncoderNetwork(self.source_manager.input_size, self.hidden_size, self.output_size).to(device)
# # self.target_discriminator = EncoderNetwork(self.source_manager.input_size, self.hidden_size, self.output_size).to(device)
def create_training_ops(self):
# Call super training ops.
super().create_training_ops()
# # Now create discriminator optimizers.
# self.source_discriminator_optimizer = torch.optim.Adam(self.source_discriminator_network.parameters(),lr=self.learning_rate)
# self.target_discriminator_optimizer = torch.optim.Adam(self.target_discriminator_network.parameters(),lr=self.learning_rate)
# Instead of using the individuals policy manager optimizers, use one single optimizer.
self.parameter_list = self.source_manager.parameter_list + self.target_manager.parameter_list
self.optimizer = torch.optim.Adam(self.parameter_list, lr=self.learning_rate)
# def save_all_models(self, suffix):
# # Call super save model.
# super().save_all_models(suffix)
# # Now save the individual source / target discriminators.
# self.save_object['Source_Discriminator_Network'] = self.source_discriminator_network.state_dict()
# self.save_object['Target_Discriminator_Network'] = self.target_discriminator_network.state_dict()
# # Overwrite the save from super.
# torch.save(self.save_object,os.path.join(self.savedir,"Model_"+suffix))
# def load_all_models(self, path):
# # Call super load.
# super().load_all_models(path)
# # Now load the individual source and target discriminators.
# self.source_discriminator.load_state_dict(self.load_object['Source_Discriminator_Network'])
# self.target_discriminator.load_state_dict(self.load_object['Target_Discriminator_Network'])
# A bunch of functions should just be directly usable:
# get_domain_manager, get_trajectory_segment_tuple, encode_decode_trajectory, update_plots, get_transform,
# transform_zs, get_embeddings, plot_embeddings, get_trajectory_visuals, evaluate_correspondence_metrics,
# evaluate, automatic_evaluation
def get_start_state(self, domain, source_latent_z):
# Function to retrieve the start state for differentiable decoding from target domain.
# How we do this is first to retrieve the target domain latent z closest to the source_latent_z.
# We then select the trajectory corresponding to this target_domain latent_z.
# We then copy the start state of this trajectory.
if not(self.neighbor_obj_set):
self.set_neighbor_objects()
# First get neighbor object and trajectory sets.
neighbor_object_list = [self.source_neighbors_object, self.target_neighbors_object]
trajectory_set_list = [self.source_manager.trajectory_set, self.target_manager.trajectory_set]
# Remember, we need _target_ domain. So use 1-domain instead of domain.
neighbor_object = neighbor_object_list[1-domain]
trajectory_set = trajectory_set_list[1-domain]
# Next get closest target z.
_ , target_latent_z_index = neighbor_object.kneighbors(source_latent_z.squeeze(0).detach().cpu().numpy())
# Don't actually need the target_latent_z, unless we're doing differentiable nearest neighbor transfer.
# Now get the corresponding trajectory.
trajectory = trajectory_set[target_latent_z_index]
# Finally, pick up first state.
start_state = trajectory[0]
return start_state
def differentiable_rollout(self, policy_manager, trajectory_start, latent_z, rollout_length=None):
# Now implementing a differentiable_rollout function that takes in a policy manager.
# Copying over from rollout_robot_trajectory. This function should provide rollout template, but may need modifications for differentiability.
# Remember, the differentiable rollout is required because the backtranslation / cycle-consistency loss needs to be propagated through multiple sets of translations.
# Therefore it must pass through the decoder network(s), and through the latent_z's. (It doesn't actually pass through the states / actions?).
subpolicy_inputs = torch.zeros((1,2*policy_manager.state_dim+policy_manager.latent_z_dimensionality)).to(device).float()
subpolicy_inputs[0,:policy_manager.state_dim] = torch.tensor(trajectory_start).to(device).float()
subpolicy_inputs[:,2*policy_manager.state_dim:] = torch.tensor(latent_z).to(device).float()
if rollout_length is not None:
length = rollout_length-1
else:
length = policy_manager.rollout_timesteps-1
for t in range(length):
# Get actions from the policy.
actions = policy_manager.policy_network.reparameterized_get_actions(subpolicy_inputs, greedy=True)
# Select last action to execute.
action_to_execute = actions[-1].squeeze(1)
# Downscale the actions by action_scale_factor.
action_to_execute = action_to_execute/self.args.action_scale_factor
# Compute next state.
new_state = subpolicy_inputs[t,:policy_manager.state_dim]+action_to_execute
# New input row.
input_row = torch.zeros((1,2*policy_manager.state_dim+policy_manager.latent_z_dimensionality)).to(device).float()
input_row[0,:policy_manager.state_dim] = new_state
# Feed in the ORIGINAL prediction from the network as input. Not the downscaled thing.
input_row[0,policy_manager.state_dim:2*policy_manager.state_dim] = actions[-1].squeeze(1)
input_row[0,2*policy_manager.state_dim:] = latent_z
# Now that we have assembled the new input row, concatenate it along temporal dimension with previous inputs.
subpolicy_inputs = torch.cat([subpolicy_inputs,input_row],dim=0)
trajectory = subpolicy_inputs[:,:policy_manager.state_dim].detach().cpu().numpy()
differentiable_trajectory = subpolicy_inputs[:,:policy_manager.state_dim]
differentiable_action_seq = subpolicy_inputs[:,policy_manager.state_dim:2*policy_manager.state_dim]
differentiable_state_action_seq = subpolicy_inputs[:,:2*policy_manager.state_dim]
# For differentiabiity, return tuple of trajectory, actions, state actions, and subpolicy_inputs.
return [differentiable_trajectory, differentiable_action_seq, differentiable_state_action_seq, subpolicy_inputs]
def get_source_target_domain_managers(self):
domain = np.random.binomial(1,0.5)
# Also Get domain policy manager.
source_policy_manager = self.get_domain_manager(domain)
target_policy_manager = self.get_domain_manager(1-domain)
return domain, source_policy_manager, target_policy_manager
def cross_domain_decoding(self, domain, domain_manager, latent_z, start_state=None):
# If start state is none, first get start state, else use the argument.
if start_state is None:
start_state = self.get_start_state(domain, latent_z)
# Now rollout in target domain.
differentiable_trajectory, differentiable_action_seq, differentiable_state_action_seq, subpolicy_inputs = self.differentiable_rollout(domain_manager, start_state, latent_z)
return differentiable_trajectory, subpolicy_inputs
def update_networks(self, dictionary, source_policy_manager):
# Here are the objectives we have to be considering.
# 1) Reconstruction of inputs under single domain encoding / decoding.
# In this implementation, we just have to use the source_loglikelihood for this.
# 2) Discriminability of Z space. This is taken care of from the compute_discriminator_losses function.
# 3) Cycle-consistency. This may be implemented as regression (L2), loglikelihood of cycle-reconstructed traj, or discriminability of trajectories.
# In this implementation, we just have to use the cross domain decoded loglikelihood.
####################################
# First update encoder decoder networks. Don't train discriminator.
####################################
# Zero gradients.
self.optimizer.zero_grad()
####################################
# (1) Compute single-domain reconstruction loss.
####################################
# Compute VAE loss on the current domain as negative log likelihood likelihood plus weighted KL.
self.source_likelihood_loss = -dictionary['source_loglikelihood'].mean()
self.source_encoder_KL = dictionary['source_kl_divergence'].mean()
self.source_reconstruction_loss = self.source_likelihood_loss + self.args.kl_weight*self.source_encoder_KL
####################################
# (2) Compute discriminability losses.
####################################
# This block first computes discriminability losses:
# # a) First, feeds the latent_z into the z_discriminator, that is being trained to discriminate between z's of source and target domains.
# # Gets and returns the loglikelihood of the discriminator predicting the true domain.
# # Also returns discriminability loss, that is used to train the _encoders_ of both domains.
# #
# # b) ####### DON'T NEED TO DO THIS YET: ####### Also feeds either the cycle reconstructed trajectory, or the original trajectory from the source domain, into a separate discriminator.
# # This second discriminator is specific to the domain we are operating in. This discriminator is discriminating between the reconstructed and original trajectories.
# # Basically standard GAN adversarial training, except the generative model here is the entire cycle-consistency translation model.
#
# In addition to this, must also compute discriminator losses to train discriminators themselves.
# # a) For the z discriminator (and if we're using trajectory discriminators, those too), clone and detach the inputs of the discriminator and compute a discriminator loss with the right domain used in targets / supervision.
# # This discriminator loss is what is used to actually train the discriminators.
# Get z discriminator logprobabilities.
z_discriminator_logprob, z_discriminator_prob = self.discriminator_network(dictionary['source_latent_z'])
# Compute discriminability loss. Remember, this is not used for training the discriminator, but rather the encoders.
self.z_discriminability_loss = self.negative_log_likelihood_loss_function(z_discriminator_logprob.squeeze(1), torch.tensor(1-domain).to(device).long().view(1,))
###### Block that computes discriminability losses assuming we are using trjaectory discriminators. ######
# # Get the right trajectory discriminator network.
# discriminator_list = [self.source_discriminator, self.target_discriminator]
# source_discriminator = discriminator_list[domain]
# # Now feed trajectory to the trajectory discriminator, based on whether it is the source of target discriminator.
# traj_discriminator_logprob, traj_discriminator_prob = source_discriminator(trajectory)
# # Compute trajectory discriminability loss, based on whether the trajectory was original or reconstructed.
# self.traj_discriminability_loss = self.negative_log_likelihood_loss_function(traj_discriminator_logprob.squeeze(1), torch.tensor(1-original_or_reconstructed).to(device).long().view(1,))
####################################
# (3) Compute cycle-consistency losses.
####################################
# Must compute likelihoods of original actions under the cycle reconstructed trajectory states.
# I.e. evaluate likelihood of original actions under source_decoder (i.e. source subpolicy), with the subpolicy inputs constructed from cycle-reconstruction.
# Get the original action sequence.
original_action_sequence = dictionary['source_subpolicy_inputs_original'][:,self.state_dim:2*self.state_dim]
# Now evaluate likelihood of actions under the source decoder.
cycle_reconstructed_loglikelihood, _ = source_policy_manager.forward(dictionary['source_subpolicy_inputs_crossdomain'], original_action_sequence)
# Reweight the cycle reconstructed likelihood to construct the loss.
self.cycle_reconstruction_loss = -self.args.cycle_reconstruction_loss_weight*cycle_reconstruction_loss.mean()
####################################
# Now that individual losses are computed, compute total loss, compute gradients, and then step.
####################################
# First combine losses.
self.total_VAE_loss = self.source_reconstruction_loss + self.z_discriminability_loss + self.cycle_reconstruction_loss
# If we are in a encoder / decoder training phase, compute gradients and step.
if not(self.skip_vae):
self.total_VAE_loss.backward()
self.optimizer.step()
####################################
# Now compute discriminator losses and update discriminator network(s).
####################################
# First zero out the discriminator gradients.
self.discriminator_optimizer.zero_grad()
# Detach the latent z that is fed to the discriminator, and then compute discriminator loss.
# If we tried to zero grad the discriminator and then use NLL loss on it again, Pytorch would cry about going backward through a part of the graph that we already \
# went backward through. Instead, just pass things through the discriminator again, but this time detaching latent_z.
z_discriminator_detach_logprob, z_discriminator_detach_prob = self.discriminator_network(dictionary['source_latent_z'].detach())
# Compute discriminator loss for discriminator.
self.z_discriminator_loss = self.negative_log_likelihood_loss_function(z_discriminator_detach_logprob.squeeze(1), torch.tensor(domain).to(device).long().view(1,))
if not(self.skip_discriminator):
# Now go backward and take a step.
self.z_discriminator_loss.backward()
self.discriminator_optimizer.step()
def run_iteration(self, counter, i):
# Phases:
# Phase 1: Train encoder-decoder for both domains initially, so that discriminator is not fed garbage.
# Phase 2: Train encoder, decoder for each domain, and Z discriminator concurrently.
# Phase 3: Train encoder, decoder for each domain, and the individual source and target discriminators, concurrently.
# Algorithm (joint training):
# For every epoch:
# # For every datapoint:
# # 1) Select which domain to use as source (i.e. with 50% chance, select either domain).
# # 2) Get trajectory segments from desired domain.
# # 3) Transfer Steps:
# # a) Encode trajectory as latent z (domain 1).
# # b) Use domain 2 decoder to decode latent z into trajectory (domain 2).
# # c) Use domain 2 encoder to encode trajectory into latent z (domain 2).
# # d) Use domain 1 decoder to decode latent z (domain 2) into trajectory (domain 1).
# # 4) Feed cycle-reconstructed trajectory and original trajectory (both domain 1) into discriminator.
# # 5) Train discriminators to predict whether original or cycle reconstructed trajectory.
# # Alternate: Remember, don't actually need to use trajectory level discriminator networks, can just use loglikelihood cycle-reconstruction loss. Try this first.
# # Train z discriminator to predict which domain the latentz sample came from.
# # Train encoder / decoder architectures with mix of reconstruction loss and discriminator confusing objective.
# # Compute and apply gradient updates.
# Remember to make domain agnostic function calls to encode, feed into discriminator, get likelihoods, etc.
####################################
# (0) Setup things like training phases, epislon values, etc.
####################################
self.set_iteration(counter)
dictionary = {}
target_dict = {}
####################################
# (1) Select which domain to use as source domain (also supervision of z discriminator for this iteration).
####################################
domain, source_policy_manager, target_policy_manager = self.get_source_target_domain_managers()
####################################
# (2) & (3 a) Get source trajectory (segment) and encode into latent z. Decode using source decoder, to get loglikelihood for reconstruction objectve.
####################################
dictionary['source_subpolicy_inputs_original'], dictionary['source_latent_z'], dictionary['source_loglikelihood'], dictionary['source_kl_divergence'] = self.encode_decode_trajectory(source_policy_manager, i)
####################################
# (3 b) Cross domain decoding.
####################################
target_dict['target_trajectory_rollout'], target_dict['target_subpolicy_inputs'] = self.cross_domain_decoding(domain, target_policy_manager, dictionary['source_latent_z'])
####################################
# (3 c) Cross domain encoding of target_trajectory_rollout into target latent_z.
####################################
dictionary['target_subpolicy_inputs'], dictionary['target_latent_z'], dictionary['target_loglikelihood'], dictionary['target_kl_divergence'] = self.encode_decode_trajectory(target_policy_manager, i, trajectory_input=target_dict)
####################################
# (3 d) Cross domain decoding of target_latent_z into source trajectory.
# Can use the original start state, or also use the reverse trick for start state. Try both maybe.
####################################
source_trajectory_rollout, dictionary['source_subpolicy_inputs_crossdomain'] = self.cross_domain_decoding(domain, source_policy_manager, dictionary['target_latent_z'], start_state=dictionary['source_subpolicy_inputs'][0,:self.state_dim].detach().cpu().numpy())
####################################
# (4) Feed source and target latent z's to z_discriminator.
####################################
self.compute_discriminator_losses(domain, dictionary['source_latent_z'])
####################################
# (5) Compute all losses, reweight, and take gradient steps.
####################################
self.update_networks(dictionary, source_policy_manager)
# viz_dict = {'domain': domain, 'discriminator_probs': discriminator_prob.squeeze(0).squeeze(0)[domain].detach().cpu().numpy()}
# self.update_plots(counter, viz_dict)
# Encode decode function: First encodes, takes trajectory segment, and outputs latent z. The latent z is then provided to decoder (along with initial state), and then we get SOURCE domain subpolicy inputs.
# Cross domain decoding function: Takes encoded latent z (and start state), and then rolls out with target decoder. Function returns, target trajectory, action sequence, and TARGET domain subpolicy inputs.
| CausalSkillLearning-main | Experiments/PolicyManagers.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from headers import *
def resample(original_trajectory, desired_number_timepoints):
original_traj_len = len(original_trajectory)
new_timepoints = np.linspace(0, original_traj_len-1, desired_number_timepoints, dtype=int)
return original_trajectory[new_timepoints]
class Transition():
def __init__(self, state, action, next_state, onestep_reward, terminal, success):
# Now that we're doing 1step TD, and AC architectures rather than MC,
# Don't need an explicit value of return.
self.state = state
self.action = action
self.next_state = next_state
self.onestep_reward = onestep_reward
self.terminal = terminal
self.success = success
class Episode_TransitionList():
def __init__(self, transition_list):
self.episode = transition_list
def length(self):
return len(self.episode)
# Alternate way of implementing an episode...
# Make it a class that has state_list, action_list, etc. over the episode..
class Episode():
def __init__(self, state_list=None, action_list=None, reward_list=None, terminal_list=None):
self.state_list = state_list
self.action_list = action_list
self.reward_list = reward_list
self.terminal_list = terminal_list
self.episode_lenth = len(self.state_list)
def length(self):
return self.episode_lenth
class HierarchicalEpisode(Episode):
def __init__(self, state_list=None, action_list=None, reward_list=None, terminal_list=None, latent_z_list=None, latent_b_list=None):
super(HierarchicalEpisode, self).__init__(state_list, action_list, reward_list, terminal_list)
self.latent_z_list = latent_z_list
self.latent_b_list = latent_b_list
class ReplayMemory():
def __init__(self, memory_size=10000):
# Implementing the memory as a list of EPISODES.
# This acts as a queue.
self.memory = []
# Accessing the memory with indices should be constant time, so it's okay to use a list.
# Not using a priority either.
self.memory_len = 0
self.memory_size = memory_size
print("Setup Memory.")
def append_to_memory(self, episode):
if self.check_full():
# Remove first episode in the memory (queue).
self.memory.pop(0)
# Now push the episode to the end of hte queue.
self.memory.append(episode)
else:
self.memory.append(episode)
self.memory_len+=1
def sample_batch(self, batch_size=25):
self.memory_len = len(self.memory)
indices = np.random.randint(0,high=self.memory_len,size=(batch_size))
return indices
def retrieve_batch(self, batch_size=25):
# self.memory_len = len(self.memory)
return np.arange(0,batch_size)
def check_full(self):
self.memory_len = len(self.memory)
if self.memory_len<self.memory_size:
return 0
else:
return 1
# Refer: https://towardsdatascience.com/deep-deterministic-policy-gradients-explained-2d94655a9b7b
"""
Taken from https://github.com/vitchyr/rlkit/blob/master/rlkit/exploration_strategies/ou_strategy.py
"""
class OUNoise(object):
def __init__(self, action_space_size, mu=0.0, theta=0.15, max_sigma=0.2, min_sigma=0.2, decay_period=100000):
self.mu = mu
self.theta = theta
self.sigma = max_sigma
self.max_sigma = max_sigma
self.min_sigma = min_sigma
self.decay_period = decay_period
self.action_dim = action_space_size
self.low = -np.ones((self.action_dim))
self.high = np.ones((self.action_dim))
self.reset()
def reset(self):
self.state = np.ones(self.action_dim) * self.mu
def evolve_state(self):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(self.action_dim)
self.state = x + dx
return self.state
def get_action(self, action, t=0):
ou_state = self.evolve_state()
self.sigma = self.max_sigma - (self.max_sigma - self.min_sigma) * min(1.0, t / self.decay_period)
return np.clip(action + ou_state, self.low, self.high) | CausalSkillLearning-main | Experiments/RLUtils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Code referenced from https://gist.github.com/gyglim/1f8dfb1b5c82627ae3efcfbbadb9f514
import tensorflow as tf
import numpy as np
import scipy.misc
try:
from StringIO import StringIO # Python 2.7
except ImportError:
from io import BytesIO # Python 3.x
import tempfile
import moviepy.editor as mpy
import os
import os.path as osp
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import summary_op_util
def py_encode_gif(im_thwc, tag, fps=4):
"""
Given a 4D numpy tensor of images, encodes as a gif.
"""
with tempfile.NamedTemporaryFile() as f: fname = f.name + '.gif'
clip = mpy.ImageSequenceClip(list(im_thwc), fps=fps)
clip.write_gif(fname, verbose=False, logger=None)
with open(fname, 'rb') as f: enc_gif = f.read()
os.remove(fname)
# create a tensorflow image summary protobuf:
thwc = im_thwc.shape
im_summ = tf.Summary.Image()
im_summ.height = thwc[1]
im_summ.width = thwc[2]
im_summ.colorspace = 3 # fix to 3 == RGB
im_summ.encoded_image_string = enc_gif
return im_summ
# create a summary obj:
#summ = tf.Summary()
#summ.value.add(tag=tag, image=im_summ)
#summ_str = summ.SerializeToString()
#return summ_str
class Logger(object):
def __init__(self, log_dir):
"""Create a summary writer logging to log_dir."""
# Switching to TF 2.2 implementation.
self.writer = tf.summary.create_file_writer(log_dir)
# self.writer = tf.summary.FileWriter(log_dir)
def scalar_summary(self, tag, value, step):
"""Log a scalar variable."""
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])
self.writer.add_summary(summary, step)
def gif_summary(self, tag, images, step):
"""Log a list of TXHXWX3 images."""
# from https://github.com/tensorflow/tensorboard/issues/39
img_summaries = []
for i, img in enumerate(images):
# Create a Summary value
img_sum = py_encode_gif(img, '%s/%d' % (tag, i), fps=4)
img_summaries.append(tf.Summary.Value(tag='%s/%d' % (tag, i), image=img_sum))
# Create and write Summary
summary = tf.Summary(value=img_summaries)
self.writer.add_summary(summary, step)
def image_summary(self, tag, images, step):
"""Log a list of images."""
img_summaries = []
for i, img in enumerate(images):
# Write the image to a string
try:
s = StringIO()
except:
s = BytesIO()
scipy.misc.toimage(img).save(s, format="png")
# Create an Image object
img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(),
height=img.shape[0],
width=img.shape[1])
# Create a Summary value
img_summaries.append(tf.Summary.Value(tag='%s/%d' % (tag, i), image=img_sum))
# Create and write Summary
summary = tf.Summary(value=img_summaries)
self.writer.add_summary(summary, step)
def histo_summary(self, tag, values, step, bins=1000):
"""Log a histogram of the tensor of values."""
# Create a histogram using numpy
counts, bin_edges = np.histogram(values, bins=bins)
# Fill the fields of the histogram proto
hist = tf.HistogramProto()
hist.min = float(np.min(values))
hist.max = float(np.max(values))
hist.num = int(np.prod(values.shape))
hist.sum = float(np.sum(values))
hist.sum_squares = float(np.sum(values**2))
# Drop the start of the first bin
bin_edges = bin_edges[1:]
# Add bin edges and counts
for edge in bin_edges:
hist.bucket_limit.append(edge)
for c in counts:
hist.bucket.append(c)
# Create and write Summary
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])
self.writer.add_summary(summary, step)
self.writer.flush() | CausalSkillLearning-main | Experiments/TFLogger.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from headers import *
class TestLoaderWithKwargs(unittest.TestLoader):
"""A test loader which allows to parse keyword arguments to the
test case class."""
# def loadTestsFromTestCase(self, testCaseClass, **kwargs):
def loadTestsFromTestCase(self, testCaseClass, policy_manager):
"""Return a suite of all tests cases contained in
testCaseClass."""
if issubclass(testCaseClass, unittest.suite.TestSuite):
raise TypeError("Test cases should not be derived from "\
"TestSuite. Maybe you meant to derive from"\
" TestCase?")
testCaseNames = self.getTestCaseNames(testCaseClass)
if not testCaseNames and hasattr(testCaseClass, 'runTest'):
testCaseNames = ['runTest']
# Modification here: parse keyword arguments to testCaseClass.
test_cases = []
# embed()
for test_case_name in testCaseNames:
# test_cases.append(testCaseClass(policy_manager))
test_cases.append(testCaseClass(test_case_name, policy_manager))
loaded_suite = self.suiteClass(test_cases)
return loaded_suite
class MetaTestClass(unittest.TestCase):
def __init__(self, test_name, policy_manager):
super(MetaTestClass, self).__init__(test_name)
self.policy_manager = policy_manager
self.args = self.policy_manager.args
self.dataset = self.policy_manager.dataset
def test_dataloader(self):
if self.args.data=='Roboturk':
self.check_Roboturkdataloader()
if self.args.data=='MIME':
self.check_MIMEdataloader()
def check_MIMEdataloader(self):
# Check the first index of the dataset.
data_element = self.dataset[0]
validity = data_element['is_valid']==1
check_demo_data = (data_element['demo']==np.load("Test_Data/MIME_Dataloader_DE.npy")).all()
self.assertTrue(validity and check_demo_data)
def check_Roboturkdataloader(self):
# Check the first index of the dataset.
data_element = self.dataset[0]
validity = data_element['is_valid']
check_demo_data = (data_element['demo']==np.load("Test_Data/Roboturk_Dataloader_DE.npy")).all()
self.assertTrue(validity and check_demo_data)
def test_variational_policy(self):
if self.args.setting=='learntsub':
# Assume the variational policy is an instance of ContinuousVariationalPolicyNetwork_BPrior class.
inputs = torch.ones((40,self.policy_manager.variational_policy.input_size)).cuda().float()
expected_outputs = np.load("Test_Data/{0}_Varpolicy_Res.npy".format(self.args.data),allow_pickle=True)
pred_outputs = self.policy_manager.variational_policy.forward(inputs, epsilon=0.)
error = (((expected_outputs[0]-pred_outputs[0])**2).mean()).detach().cpu().numpy()
threshold = 0.01
self.assertTrue(error < threshold)
else:
pass
def test_subpolicy(self):
# Assume the subpolicy is an instance of ContinuousPolicyNetwork class.
inputs = torch.ones((15,self.policy_manager.policy_network.input_size)).cuda().float()
actions = np.ones((15,self.policy_manager.policy_network.output_size))
expected_outputs = np.load("Test_Data/{0}_Subpolicy_Res.npy".format(self.args.data),allow_pickle=True)
pred_outputs = self.policy_manager.policy_network.forward(inputs, actions)
error = (((expected_outputs[0]-pred_outputs[0])**2).mean()).detach().cpu().numpy()
threshold = 0.01
self.assertTrue(error < threshold)
def test_latent_policy(self):
# Assume the latent policy is a ContinuousLatentPolicyNetwork class.
pass
def test_encoder_policy(self):
# Assume is instance of ContinuousEncoderNetwork class.
pass | CausalSkillLearning-main | Experiments/TestClass.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from headers import *
import os.path as osp
def select_baxter_angles(trajectory, joint_names, arm='right'):
# joint names in order as used via mujoco visualizer
baxter_joint_names = ['right_s0', 'right_s1', 'right_e0', 'right_e1', 'right_w0', 'right_w1', 'right_w2', 'left_s0', 'left_s1', 'left_e0', 'left_e1', 'left_w0', 'left_w1', 'left_w2']
if arm == 'right':
select_joints = baxter_joint_names[:7]
elif arm == 'left':
select_joints = baxter_joint_names[7:]
elif arm == 'both':
select_joints = baxter_joint_names
inds = [joint_names.index(j) for j in select_joints]
return trajectory[:, inds]
def resample(original_trajectory, desired_number_timepoints):
original_traj_len = len(original_trajectory)
new_timepoints = np.linspace(0, original_traj_len-1, desired_number_timepoints, dtype=int)
return original_trajectory[new_timepoints]
class MIME_Dataset(Dataset):
'''
Class implementing instance of dataset class for MIME data.
'''
def __init__(self, split='all'):
self.dataset_directory = '/checkpoint/tanmayshankar/MIME/'
self.ds_freq = 20
# Default: /checkpoint/tanmayshankar/MIME/
self.fulltext = osp.join(self.dataset_directory, 'MIME_jointangles/*/*/joint_angles.txt')
self.filelist = glob.glob(self.fulltext)
with open(self.filelist[0], 'r') as file:
lines = file.readlines()
self.joint_names = sorted(eval(lines[0].rstrip('\n')).keys())
if split == 'all':
self.filelist = self.filelist
else:
self.task_lists = np.load(os.path.join(
self.dataset_directory, 'MIME_jointangles/{}_Lists.npy'.format(split.capitalize())))
self.filelist = []
for i in range(20):
self.filelist.extend(self.task_lists[i])
self.filelist = [f.replace('/checkpoint/tanmayshankar/MIME/', self.dataset_directory) for f in self.filelist]
# print(len(self.filelist))
def __len__(self):
# Return length of file list.
return len(self.filelist)
def __getitem__(self, index):
'''
# Returns Joint Angles as:
# List of length Number_Timesteps, with each element of the list a dictionary containing the sequence of joint angles.
# Assumes index is within range [0,len(filelist)-1]
'''
file = self.filelist[index]
left_gripper = np.loadtxt(os.path.join(os.path.split(file)[0],'left_gripper.txt'))
right_gripper = np.loadtxt(os.path.join(os.path.split(file)[0],'right_gripper.txt'))
orig_left_traj = np.load(osp.join(osp.split(file)[0], 'Left_EE.npy'))
orig_right_traj = np.load(osp.join(osp.split(file)[0], 'Right_EE.npy'))
joint_angle_trajectory = []
# Open file.
with open(file, 'r') as file:
lines = file.readlines()
for line in lines:
dict_element = eval(line.rstrip('\n'))
if len(dict_element.keys()) == len(self.joint_names):
# some files have extra lines with gripper keys e.g. MIME_jointangles/4/12405Nov19/joint_angles.txt
array_element = np.array([dict_element[joint] for joint in self.joint_names])
joint_angle_trajectory.append(array_element)
joint_angle_trajectory = np.array(joint_angle_trajectory)
n_samples = len(orig_left_traj) // self.ds_freq
elem = {}
elem['joint_angle_trajectory'] = resample(joint_angle_trajectory, n_samples)
elem['left_trajectory'] = resample(orig_left_traj, n_samples)
elem['right_trajectory'] = resample(orig_right_traj, n_samples)
elem['left_gripper'] = resample(left_gripper, n_samples)/100
elem['right_gripper'] = resample(right_gripper, n_samples)/100
elem['path_prefix'] = os.path.split(self.filelist[index])[0]
elem['ra_trajectory'] = select_baxter_angles(elem['joint_angle_trajectory'], self.joint_names, arm='right')
elem['la_trajectory'] = select_baxter_angles(elem['joint_angle_trajectory'], self.joint_names, arm='left')
# If max norm of differences is <1.0, valid.
# if elem['joint_angle_trajectory'].shape[0]>1:
elem['is_valid'] = int(np.linalg.norm(np.diff(elem['joint_angle_trajectory'],axis=0),axis=1).max() < 1.0)
return elem
def recreate_dictionary(self, arm, joint_angles):
if arm=="left":
offset = 2
width = 7
elif arm=="right":
offset = 9
width = 7
elif arm=="full":
offset = 0
width = len(self.joint_names)
return dict((self.joint_names[i],joint_angles[i-offset]) for i in range(offset,offset+width))
class MIME_NewDataset(Dataset):
def __init__(self, split='all'):
self.dataset_directory = '/checkpoint/tanmayshankar/MIME/'
# Load the entire set of trajectories.
self.data_list = np.load(os.path.join(self.dataset_directory, "Data_List.npy"),allow_pickle=True)
self.dataset_length = len(self.data_list)
def __len__(self):
# Return length of file list.
return self.dataset_length
def __getitem__(self, index):
# Return n'th item of dataset.
# This has already processed everything.
return self.data_list[index]
def compute_statistics(self):
self.state_size = 16
self.total_length = self.__len__()
mean = np.zeros((self.state_size))
variance = np.zeros((self.state_size))
mins = np.zeros((self.total_length, self.state_size))
maxs = np.zeros((self.total_length, self.state_size))
lens = np.zeros((self.total_length))
# And velocity statistics.
vel_mean = np.zeros((self.state_size))
vel_variance = np.zeros((self.state_size))
vel_mins = np.zeros((self.total_length, self.state_size))
vel_maxs = np.zeros((self.total_length, self.state_size))
for i in range(self.total_length):
print("Phase 1: DP: ",i)
data_element = self.__getitem__(i)
if data_element['is_valid']:
demo = data_element['demo']
vel = np.diff(demo,axis=0)
mins[i] = demo.min(axis=0)
maxs[i] = demo.max(axis=0)
mean += demo.sum(axis=0)
lens[i] = demo.shape[0]
vel_mins[i] = abs(vel).min(axis=0)
vel_maxs[i] = abs(vel).max(axis=0)
vel_mean += vel.sum(axis=0)
mean /= lens.sum()
vel_mean /= lens.sum()
for i in range(self.total_length):
print("Phase 2: DP: ",i)
data_element = self.__getitem__(i)
# Just need to normalize the demonstration. Not the rest.
if data_element['is_valid']:
demo = data_element['demo']
vel = np.diff(demo,axis=0)
variance += ((demo-mean)**2).sum(axis=0)
vel_variance += ((vel-vel_mean)**2).sum(axis=0)
variance /= lens.sum()
variance = np.sqrt(variance)
vel_variance /= lens.sum()
vel_variance = np.sqrt(vel_variance)
max_value = maxs.max(axis=0)
min_value = mins.min(axis=0)
vel_max_value = vel_maxs.max(axis=0)
vel_min_value = vel_mins.min(axis=0)
np.save("MIME_Orig_Mean.npy", mean)
np.save("MIME_Orig_Var.npy", variance)
np.save("MIME_Orig_Min.npy", min_value)
np.save("MIME_Orig_Max.npy", max_value)
np.save("MIME_Orig_Vel_Mean.npy", vel_mean)
np.save("MIME_Orig_Vel_Var.npy", vel_variance)
np.save("MIME_Orig_Vel_Min.npy", vel_min_value)
np.save("MIME_Orig_Vel_Max.npy", vel_max_value)
class MIME_Dataloader_Tester(unittest.TestCase):
def test_MIMEdataloader(self):
self.dataset = MIME_NewDataset()
# Check the first index of the dataset.
data_element = self.dataset[0]
validity = data_element['is_valid']==1
check_demo_data = (data_element['demo']==np.load("Test_Data/MIME_Dataloader_DE.npy")).all()
self.assertTrue(validity and check_demo_data)
if __name__ == '__main__':
# Run all tests defined for the dataloader.
unittest.main() | CausalSkillLearning-main | Experiments/MIME_DataLoader.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags, app
import copy, os, imageio, scipy.misc, pdb, math, time, numpy as np
import robosuite, threading
from robosuite.wrappers import IKWrapper
import matplotlib.pyplot as plt
from IPython import embed
# # Mocap viz.
# import MocapVisualizationUtils
# from mocap_processing.motion.pfnn import Animation, BVH
class SawyerVisualizer():
def __init__(self, has_display=False):
# Create environment.
print("Do I have a display?", has_display)
# self.base_env = robosuite.make('BaxterLift', has_renderer=has_display)
self.base_env = robosuite.make("SawyerViz",has_renderer=has_display)
# Create kinematics object.
self.sawyer_IK_object = IKWrapper(self.base_env)
self.environment = self.sawyer_IK_object.env
def update_state(self):
# Updates all joint states
self.full_state = self.environment._get_observation()
def set_joint_pose_return_image(self, joint_angles, arm='both', gripper=False):
# In the roboturk dataset, we've the following joint angles:
# ('time','right_j0', 'head_pan', 'right_j1', 'right_j2', 'right_j3', 'right_j4', 'right_j5', 'right_j6', 'r_gripper_l_finger_joint', 'r_gripper_r_finger_joint')
# Set usual joint angles through set joint positions API.
self.environment.reset()
self.environment.set_robot_joint_positions(joint_angles[:7])
# For gripper, use "step".
# Mujoco requires actions that are -1 for Open and 1 for Close.
# [l,r]
# gripper_open = [0.0115, -0.0115]
# gripper_closed = [-0.020833, 0.020833]
# In mujoco, -1 is open, and 1 is closed.
actions = np.zeros((8))
actions[-1] = joint_angles[-1]
# Move gripper positions.
self.environment.step(actions)
image = np.flipud(self.environment.sim.render(600, 600, camera_name='vizview1'))
return image
def visualize_joint_trajectory(self, trajectory, return_gif=False, gif_path=None, gif_name="Traj.gif", segmentations=None, return_and_save=False, additional_info=None):
image_list = []
for t in range(trajectory.shape[0]):
new_image = self.set_joint_pose_return_image(trajectory[t])
image_list.append(new_image)
# Insert white
if segmentations is not None:
if t>0 and segmentations[t]==1:
image_list.append(255*np.ones_like(new_image)+new_image)
if return_and_save:
imageio.mimsave(os.path.join(gif_path,gif_name), image_list)
return image_list
elif return_gif:
return image_list
else:
imageio.mimsave(os.path.join(gif_path,gif_name), image_list)
class BaxterVisualizer():
def __init__(self, has_display=False):
# Create environment.
print("Do I have a display?", has_display)
# self.base_env = robosuite.make('BaxterLift', has_renderer=has_display)
self.base_env = robosuite.make("BaxterViz",has_renderer=has_display)
# Create kinematics object.
self.baxter_IK_object = IKWrapper(self.base_env)
self.environment = self.baxter_IK_object.env
def update_state(self):
# Updates all joint states
self.full_state = self.environment._get_observation()
def set_ee_pose_return_image(self, ee_pose, arm='right', seed=None):
# Assumes EE pose is Position in the first three elements, and quaternion in last 4 elements.
self.update_state()
if seed is None:
# Set seed to current state.
seed = self.full_state['joint_pos']
if arm == 'right':
joint_positions = self.baxter_IK_object.controller.inverse_kinematics(
target_position_right=ee_pose[:3],
target_orientation_right=ee_pose[3:],
target_position_left=self.full_state['left_eef_pos'],
target_orientation_left=self.full_state['left_eef_quat'],
rest_poses=seed
)
elif arm == 'left':
joint_positions = self.baxter_IK_object.controller.inverse_kinematics(
target_position_right=self.full_state['right_eef_pos'],
target_orientation_right=self.full_state['right_eef_quat'],
target_position_left=ee_pose[:3],
target_orientation_left=ee_pose[3:],
rest_poses=seed
)
elif arm == 'both':
joint_positions = self.baxter_IK_object.controller.inverse_kinematics(
target_position_right=ee_pose[:3],
target_orientation_right=ee_pose[3:7],
target_position_left=ee_pose[7:10],
target_orientation_left=ee_pose[10:],
rest_poses=seed
)
image = self.set_joint_pose_return_image(joint_positions, arm=arm, gripper=False)
return image
def set_joint_pose_return_image(self, joint_pose, arm='both', gripper=False):
# FOR FULL 16 DOF STATE: ASSUMES JOINT_POSE IS <LEFT_JA, RIGHT_JA, LEFT_GRIPPER, RIGHT_GRIPPER>.
self.update_state()
self.state = copy.deepcopy(self.full_state['joint_pos'])
# THE FIRST 7 JOINT ANGLES IN MUJOCO ARE THE RIGHT HAND.
# THE LAST 7 JOINT ANGLES IN MUJOCO ARE THE LEFT HAND.
if arm=='right':
# Assume joint_pose is 8 DoF - 7 for the arm, and 1 for the gripper.
self.state[:7] = copy.deepcopy(joint_pose[:7])
elif arm=='left':
# Assume joint_pose is 8 DoF - 7 for the arm, and 1 for the gripper.
self.state[7:] = copy.deepcopy(joint_pose[:7])
elif arm=='both':
# The Plans were generated as: Left arm, Right arm, left gripper, right gripper.
# Assume joint_pose is 16 DoF. 7 DoF for left arm, 7 DoF for right arm. (These need to be flipped)., 1 for left gripper. 1 for right gripper.
# First right hand.
self.state[:7] = joint_pose[7:14]
# Now left hand.
self.state[7:] = joint_pose[:7]
# Set the joint angles magically.
self.environment.set_robot_joint_positions(self.state)
action = np.zeros((16))
if gripper:
# Left gripper is 15. Right gripper is 14.
# MIME Gripper values are from 0 to 100 (Close to Open), but we treat the inputs to this function as 0 to 1 (Close to Open), and then rescale to (-1 Open to 1 Close) for Mujoco.
if arm=='right':
action[14] = -joint_pose[-1]*2+1
elif arm=='left':
action[15] = -joint_pose[-1]*2+1
elif arm=='both':
action[14] = -joint_pose[15]*2+1
action[15] = -joint_pose[14]*2+1
# Move gripper positions.
self.environment.step(action)
image = np.flipud(self.environment.sim.render(600, 600, camera_name='vizview1'))
return image
def visualize_joint_trajectory(self, trajectory, return_gif=False, gif_path=None, gif_name="Traj.gif", segmentations=None, return_and_save=False, additional_info=None):
image_list = []
for t in range(trajectory.shape[0]):
new_image = self.set_joint_pose_return_image(trajectory[t])
image_list.append(new_image)
# Insert white
if segmentations is not None:
if t>0 and segmentations[t]==1:
image_list.append(255*np.ones_like(new_image)+new_image)
if return_and_save:
imageio.mimsave(os.path.join(gif_path,gif_name), image_list)
return image_list
elif return_gif:
return image_list
else:
imageio.mimsave(os.path.join(gif_path,gif_name), image_list)
# class MocapVisualizer():
# def __init__(self, has_display=False, args=None):
# # Load some things from the MocapVisualizationUtils and set things up so that they're ready to go.
# # self.cam_cur = MocapVisualizationUtils.camera.Camera(pos=np.array([6.0, 0.0, 2.0]),
# # origin=np.array([0.0, 0.0, 0.0]),
# # vup=np.array([0.0, 0.0, 1.0]),
# # fov=45.0)
# self.args = args
# # Default is local data.
# self.global_data = False
# self.cam_cur = MocapVisualizationUtils.camera.Camera(pos=np.array([4.5, 0.0, 2.0]),
# origin=np.array([0.0, 0.0, 0.0]),
# vup=np.array([0.0, 0.0, 1.0]),
# fov=45.0)
# # Path to dummy file that is going to populate joint_parents, initial global positions, etc.
# bvh_filename = "/private/home/tanmayshankar/Research/Code/CausalSkillLearning/Experiments/01_01_poses.bvh"
# # Run init before loading animation.
# MocapVisualizationUtils.init()
# MocapVisualizationUtils.global_positions, MocapVisualizationUtils.joint_parents, MocapVisualizationUtils.time_per_frame = MocapVisualizationUtils.load_animation(bvh_filename)
# # State sizes.
# self.number_joints = 22
# self.number_dimensions = 3
# self.total_dimensions = self.number_joints*self.number_dimensions
# # Run thread of viewer, so that callbacks start running.
# thread = threading.Thread(target=self.run_thread)
# thread.start()
# # Also create dummy animation object.
# self.animation_object, _, _ = BVH.load(bvh_filename)
# def run_thread(self):
# MocapVisualizationUtils.viewer.run(
# title='BVH viewer',
# cam=self.cam_cur,
# size=(1280, 720),
# keyboard_callback=None,
# render_callback=MocapVisualizationUtils.render_callback_time_independent,
# idle_callback=MocapVisualizationUtils.idle_callback_return,
# )
# def get_global_positions(self, positions, animation_object=None):
# # Function to get global positions corresponding to predicted or actual local positions.
# traj_len = positions.shape[0]
# def resample(original_trajectory, desired_number_timepoints):
# original_traj_len = len(original_trajectory)
# new_timepoints = np.linspace(0, original_traj_len-1, desired_number_timepoints, dtype=int)
# return original_trajectory[new_timepoints]
# if animation_object is not None:
# # Now copy over from animation_object instead of just dummy animation object.
# new_animation_object = Animation.Animation(resample(animation_object.rotations, traj_len), positions, animation_object.orients, animation_object.offsets, animation_object.parents)
# else:
# # Create a dummy animation object.
# new_animation_object = Animation.Animation(self.animation_object.rotations[:traj_len], positions, self.animation_object.orients, self.animation_object.offsets, self.animation_object.parents)
# # Then transform them.
# transformed_global_positions = Animation.positions_global(new_animation_object)
# # Now return coordinates.
# return transformed_global_positions
# def visualize_joint_trajectory(self, trajectory, return_gif=False, gif_path=None, gif_name="Traj.gif", segmentations=None, return_and_save=False, additional_info=None):
# image_list = []
# if self.global_data:
# # If we predicted in the global setting, just reshape.
# predicted_global_positions = np.reshape(trajectory, (-1,self.number_joints,self.number_dimensions))
# else:
# # If it's local data, then transform to global.
# # Assume trajectory is number of timesteps x number_dimensions.
# # Convert to number_of_timesteps x number_of_joints x 3.
# predicted_local_positions = np.reshape(trajectory, (-1,self.number_joints,self.number_dimensions))
# # Assume trajectory was predicted in local coordinates. Transform to global for visualization.
# predicted_global_positions = self.get_global_positions(predicted_local_positions, animation_object=additional_info)
# # Copy into the global variable.
# MocapVisualizationUtils.global_positions = predicted_global_positions
# # Reset Image List.
# MocapVisualizationUtils.image_list = []
# # Set save_path and prefix.
# MocapVisualizationUtils.save_path = gif_path
# MocapVisualizationUtils.name_prefix = gif_name.rstrip('.gif')
# # Now set the whether_to_render as true.
# MocapVisualizationUtils.whether_to_render = True
# # Wait till rendering is complete.
# x_count = 0
# while MocapVisualizationUtils.done_with_render==False and MocapVisualizationUtils.whether_to_render==True:
# x_count += 1
# time.sleep(1)
# # Now that rendering is complete, load images.
# image_list = MocapVisualizationUtils.image_list
# # Now actually save the GIF or return.
# if return_and_save:
# imageio.mimsave(os.path.join(gif_path,gif_name), image_list)
# return image_list
# elif return_gif:
# return image_list
# else:
# imageio.mimsave(os.path.join(gif_path,gif_name), image_list)
class ToyDataVisualizer():
def __init__(self):
pass
def visualize_joint_trajectory(self, trajectory, return_gif=False, gif_path=None, gif_name="Traj.gif", segmentations=None, return_and_save=False, additional_info=None):
fig = plt.figure()
ax = fig.gca()
ax.scatter(trajectory[:,0],trajectory[:,1],c=range(len(trajectory)),cmap='jet')
plt.xlim(-10,10)
plt.ylim(-10,10)
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8).reshape(int(height), int(width), 3)
image = np.transpose(image, axes=[2,0,1])
return image
if __name__ == '__main__':
# end_eff_pose = [0.3, -0.3, 0.09798524029948213, 0.38044099037703677, 0.9228975092885654, -0.021717379118030174, 0.05525572942370394]
# end_eff_pose = [0.53303758, -0.59997265, 0.09359371, 0.77337391, 0.34998901, 0.46797516, -0.24576358]
# end_eff_pose = np.array([0.64, -0.83, 0.09798524029948213, 0.38044099037703677, 0.9228975092885654, -0.021717379118030174, 0.05525572942370394])
visualizer = MujocoVisualizer()
# img = visualizer.set_ee_pose_return_image(end_eff_pose, arm='right')
# scipy.misc.imsave('mj_vis.png', img)
| CausalSkillLearning-main | Experiments/Visualizers.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from headers import *
def resample(original_trajectory, desired_number_timepoints):
original_traj_len = len(original_trajectory)
new_timepoints = np.linspace(0, original_traj_len-1, desired_number_timepoints, dtype=int)
return original_trajectory[new_timepoints]
class Roboturk_Dataset(Dataset):
# LINK TO DATASET and INFO: http://roboturk.stanford.edu/dataset.html
# Class implementing instance of Roboturk dataset.
def __init__(self, args):
self.dataset_directory = '/checkpoint/tanmayshankar/Roboturk/RoboTurkPilot'
self.args = args
# Require a task list.
# The task name is needed for setting the environment, rendering.
# We shouldn't need the environment for .. training though, should we?
self.task_list = ["bins-Bread", "bins-Can", "bins-Cereal", "bins-full", "bins-Milk", "pegs-full", "pegs-RoundNut", "pegs-SquareNut"]
self.num_demos = np.array([1069, 1069, 1069, 1069, 1069, 1145, 1144, 1145])
self.cummulative_num_demos = self.num_demos.cumsum()
self.cummulative_num_demos = np.insert(self.cummulative_num_demos,0,0)
# Append -1 to the start of cummulative_num_demos. This has two purposes.
# The first is that when we are at index 0 of the dataset, if we appended 0, np.searchsorted returns 0, rather than 1.
# For index 1, it returns 1. This was becoming inconsistent behavior for demonstrations in the same task.
# Now with -1 added to cumm_num_demos, when we are at task index 0, it would add -1 to the demo index. This is necessary for ALL tasks, not just the first...
# So that foils our really clever idea.
# Well, if the searchsorted returns the index of the equalling element, it probably consistently does this irrespective of vlaue.
# This means we can use this...
# No need for a clever solution, searchsorted has a "side" option that takes care of this.
self.total_length = self.num_demos.sum()
# Seems to follow joint angles order:
# ('time','right_j0', 'head_pan', 'right_j1', 'right_j2', 'right_j3', 'right_j4', 'right_j5', 'right_j6', 'r_gripper_l_finger_joint', 'r_gripper_r_finger_joint', 'Milk0', 'Bread0', 'Cereal0', 'Can0').
# Extract these into...
self.joint_angle_indices = [1,3,4,5,6,7,8]
self.gripper_indices = [9,10]
self.ds_freq = 20
# self.r_gripper_r_finger_joint = np.array([-0.0116, 0.020833])
# self.r_gripper_l_finger_joint = np.array([-0.020833, 0.0135])
# [l,r]
# gripper_open = [0.0115, -0.0115]
# gripper_closed = [-0.020833, 0.020833]
# Set files.
self.setup()
def setup(self):
# Load data from all tasks.
self.files = []
for i in range(len(self.task_list)):
self.files.append(h5py.File("{0}/{1}/demo.hdf5".format(self.dataset_directory,self.task_list[i]),'r'))
def __len__(self):
return self.total_length
def __getitem__(self, index):
if index>=self.total_length:
print("Out of bounds of dataset.")
return None
# Get bucket that index falls into based on num_demos array.
task_index = np.searchsorted(self.cummulative_num_demos, index, side='right')-1
if index==self.total_length-1:
task_index-=1
# Decide task ID, and new index modulo num_demos.
# Subtract number of demonstrations in cumsum until then, and then
new_index = index-self.cummulative_num_demos[max(task_index,0)]+1
try:
# Get raw state sequence.
state_sequence = self.files[task_index]['data/demo_{0}/states'.format(new_index)].value
except:
# If this failed, return invalid.
data_element = {}
data_element['is_valid'] = False
return data_element
# Performing another check that makes sure data element actually has states.
if state_sequence.shape[0]==0:
data_element = {}
data_element['is_valid'] = False
return data_element
# If we are here, the data element is presumably valid till now.
# Get joint angles from this state sequence.
joint_values = state_sequence[:,self.joint_angle_indices]
# Get gripper values from state sequence.
gripper_finger_values = state_sequence[:,self.gripper_indices]
# Normalize gripper values.
# 1 is right finger. 0 is left finger.
# 1-0 is right-left.
gripper_values = gripper_finger_values[:,1]-gripper_finger_values[:,0]
gripper_values = (gripper_values-gripper_values.min()) / (gripper_values.max()-gripper_values.min())
gripper_values = 2*gripper_values-1
concatenated_demonstration = np.concatenate([joint_values,gripper_values.reshape((-1,1))],axis=1)
downsampled_demonstration = resample(concatenated_demonstration, concatenated_demonstration.shape[0]//self.ds_freq)
# Performing another check that makes sure data element actually has states.
if downsampled_demonstration.shape[0]==0:
data_element = {}
data_element['is_valid'] = False
return data_element
data_element = {}
if self.args.smoothen:
data_element['demo'] = gaussian_filter1d(downsampled_demonstration,self.args.smoothing_kernel_bandwidth,axis=0,mode='nearest')
else:
data_element['demo'] = downsampled_demonstration
# Trivially setting is valid to true until we come up wiuth a better strategy.
data_element['is_valid'] = True
return data_element
def close(self):
for file in self.files:
file.close()
def preprocess_dataset(self):
# for task_index in range(len(self.task_list)):
# for task_index in [3,5]:
for task_index in [0,1,2,4,6,7]:
print("#######################################")
print("Preprocessing task index: ", task_index)
print("#######################################")
# Get the name of environment.
environment_name = self.files[task_index]['data'].attrs['env']
# Create an actual robo-suite environment.
self.env = robosuite.make(environment_name)
# Get sizes.
obs = self.env._get_observation()
robot_state_size = obs['robot-state'].shape[0]
object_state_size = obs['object-state'].shape[0]
# Create list of files for this task.
task_demo_list = []
# For every element in the filelist of the element,
for i in range(1,self.num_demos[task_index]+1):
print("Preprocessing task index: ", task_index, " Demo Index: ", i, " of: ", self.num_demos[task_index])
# Create list of datapoints for this demonstrations.
datapoint = {}
# Get SEQUENCE of flattened states.
try:
flattened_state_sequence = self.files[task_index]['data/demo_{0}/states'.format(i)].value
joint_action_sequence = self.files[task_index]['data/demo_{0}/joint_velocities'.format(i)].value
gripper_action_sequence = self.files[task_index]['data/demo_{0}/gripper_actuations'.format(i)].value
flattened_state_sequence = resample(flattened_state_sequence, flattened_state_sequence.shape[0]//self.ds_freq)
number_timesteps = flattened_state_sequence.shape[0]
robot_state_array = np.zeros((number_timesteps, robot_state_size))
object_state_array = np.zeros((number_timesteps, object_state_size))
# Get joint angle values from
joint_values = flattened_state_sequence[:,self.joint_angle_indices]
# Get gripper values from state sequence.
gripper_finger_values = flattened_state_sequence[:,self.gripper_indices]
# Normalize gripper values.
# 1 is right finger. 0 is left finger.
# 1-0 is right-left.
gripper_values = gripper_finger_values[:,1]-gripper_finger_values[:,0]
gripper_values = (gripper_values-gripper_values.min()) / (gripper_values.max()-gripper_values.min())
gripper_values = 2*gripper_values-1
concatenated_demonstration = np.concatenate([joint_values,gripper_values.reshape((-1,1))],axis=1)
concatenated_actions = np.concatenate([joint_action_sequence,gripper_action_sequence.reshape((-1,1))],axis=1)
# For every element in sequence, set environment state.
for t in range(flattened_state_sequence.shape[0]):
self.env.sim.set_state_from_flattened(flattened_state_sequence[t])
# Now get observation.
observation = self.env._get_observation()
# Robot and Object state appended to datapoint dictionary.
robot_state_array[t] = observation['robot-state']
object_state_array[t] = observation['object-state']
except:
datapoint['robot_state_array'] = np.zeros((1, robot_state_size))
datapoint['object_state_array'] = np.zeros((1, object_state_size))
# Put both lists in a dictionary.
datapoint['flat-state'] = flattened_state_sequence
datapoint['robot-state'] = robot_state_array
datapoint['object-state'] = object_state_array
datapoint['demo'] = concatenated_demonstration
datapoint['demonstrated_actions'] = concatenated_actions
# Add this dictionary to the file_demo_list.
task_demo_list.append(datapoint)
# Create array.
task_demo_array = np.array(task_demo_list)
# Now save this file_demo_list.
np.save(os.path.join(self.dataset_directory,self.task_list[task_index],"New_Task_Demo_Array.npy"),task_demo_array)
class Roboturk_FullDataset(Roboturk_Dataset):
def __init__(self, args):
super(Roboturk_FullDataset, self).__init__(args)
self.environment_names = ["SawyerPickPlaceBread","SawyerPickPlaceCan","SawyerPickPlaceCereal","SawyerPickPlace","SawyerPickPlaceMilk","SawyerNutAssembly", "SawyerNutAssemblyRound","SawyerNutAssemblySquare"]
def setup(self):
self.files = []
for i in range(len(self.task_list)):
if i==3 or i==5:
self.files.append(np.load("{0}/{1}/FullDataset_Task_Demo_Array.npy".format(self.dataset_directory, self.task_list[i]), allow_pickle=True))
else:
self.files.append(np.load("{0}/{1}/New_Task_Demo_Array.npy".format(self.dataset_directory, self.task_list[i]), allow_pickle=True))
def __getitem__(self, index):
if index>=self.total_length:
print("Out of bounds of dataset.")
return None
# Get bucket that index falls into based on num_demos array.
task_index = np.searchsorted(self.cummulative_num_demos, index, side='right')-1
# Decide task ID, and new index modulo num_demos.
# Subtract number of demonstrations in cumsum until then, and then
new_index = index-self.cummulative_num_demos[max(task_index,0)]
data_element = self.files[task_index][new_index]
resample_length = len(data_element['demo'])//self.args.ds_freq
# print("Orig:", len(data_element['demo']),"New length:",resample_length)
self.kernel_bandwidth = self.args.smoothing_kernel_bandwidth
if resample_length<=1 or data_element['robot-state'].shape[0]<=1:
data_element['is_valid'] = False
else:
data_element['is_valid'] = True
if self.args.smoothen:
data_element['demo'] = gaussian_filter1d(data_element['demo'],self.kernel_bandwidth,axis=0,mode='nearest')
data_element['robot-state'] = gaussian_filter1d(data_element['robot-state'],self.kernel_bandwidth,axis=0,mode='nearest')
data_element['object-state'] = gaussian_filter1d(data_element['object-state'],self.kernel_bandwidth,axis=0,mode='nearest')
data_element['flat-state'] = gaussian_filter1d(data_element['flat-state'],self.kernel_bandwidth,axis=0,mode='nearest')
data_element['environment-name'] = self.environment_names[task_index]
if self.args.ds_freq>1:
data_element['demo'] = resample(data_element['demo'], resample_length)
data_element['robot-state'] = resample(data_element['robot-state'], resample_length)
data_element['object-state'] = resample(data_element['object-state'], resample_length)
data_element['flat-state'] = resample(data_element['flat-state'], resample_length)
return data_element
class Roboturk_SegmentedDataset(Roboturk_Dataset):
def __init__(self):
super(Roboturk_SegmentedDataset, self).__init__()
self.dataset_directory = '/checkpoint/tanmayshankar/Roboturk/RoboTurkPilot'
# Require a task list.
# The task name is needed for setting the environment, rendering.
# We shouldn't need the environment for .. training though, should we?
self.task_list = ["bins-Bread", "bins-Can", "bins-Cereal", "bins-Milk", "pegs-RoundNut", "pegs-SquareNut"]
self.num_demos = np.array([1069, 1069, 1069, 1069, 1144, 1145])
self.cummulative_num_demos = self.num_demos.cumsum()
self.cummulative_num_demos = np.insert(self.cummulative_num_demos,0,0)
# Append -1 to the start of cummulative_num_demos. This has two purposes.
# The first is that when we are at index 0 of the dataset, if we appended 0, np.searchsorted returns 0, rather than 1.
# For index 1, it returns 1. This was becoming inconsistent behavior for demonstrations in the same task.
# Now with -1 added to cumm_num_demos, when we are at task index 0, it would add -1 to the demo index. This is necessary for ALL tasks, not just the first...
# So that foils our really clever idea.
# Well, if the searchsorted returns the index of the equalling element, it probably consistently does this irrespective of vlaue.
# This means we can use this...
# No need for a clever solution, searchsorted has a "side" option that takes care of this.
self.total_length = self.num_demos.sum()
# Load data from all tasks.
self.files = []
for i in range(len(self.task_list)):
self.files.append(h5py.File("{0}/{1}/demo.hdf5".format(self.dataset_directory,self.task_list[i]),'r'))
# Seems to follow joint angles order:
# ('time','right_j0', 'head_pan', 'right_j1', 'right_j2', 'right_j3', 'right_j4', 'right_j5', 'right_j6', 'r_gripper_l_finger_joint', 'r_gripper_r_finger_joint', 'Milk0', 'Bread0', 'Cereal0', 'Can0').
# Extract these into...
self.joint_angle_indices = [1,3,4,5,6,7,8]
self.gripper_indices = [9,10]
self.ds_freq = 20
# self.r_gripper_r_finger_joint = np.array([-0.0116, 0.020833])
# self.r_gripper_l_finger_joint = np.array([-0.020833, 0.0135])
# [l,r]
# gripper_open = [0.0115, -0.0115]
# gripper_closed = [-0.020833, 0.020833]
class Roboturk_NewSegmentedDataset(Dataset):
def __init__(self, args):
super(Roboturk_NewSegmentedDataset, self).__init__()
self.dataset_directory = '/checkpoint/tanmayshankar/Roboturk/RoboTurkPilot'
self.args = args
# Require a task list.
# The task name is needed for setting the environment, rendering.
# We shouldn't need the environment for .. training though, should we?
self.task_list = ["bins-Bread", "bins-Can", "bins-Cereal", "bins-Milk", "pegs-RoundNut", "pegs-SquareNut"]
self.environment_names = ["SawyerPickPlaceBread","SawyerPickPlaceCan","SawyerPickPlaceCereal","SawyerPickPlaceMilk","SawyerNutAssemblyRound","SawyerNutAssemblySquare"]
self.num_demos = np.array([1069, 1069, 1069, 1069, 1144, 1145])
self.cummulative_num_demos = self.num_demos.cumsum()
self.cummulative_num_demos = np.insert(self.cummulative_num_demos,0,0)
# Append -1 to the start of cummulative_num_demos. This has two purposes.
# The first is that when we are at index 0 of the dataset, if we appended 0, np.searchsorted returns 0, rather than 1.
# For index 1, it returns 1. This was becoming inconsistent behavior for demonstrations in the same task.
# Now with -1 added to cumm_num_demos, when we are at task index 0, it would add -1 to the demo index. This is necessary for ALL tasks, not just the first...
# So that foils our really clever idea.
# Well, if the searchsorted returns the index of the equalling element, it probably consistently does this irrespective of vlaue.
# This means we can use this...
# No need for a clever solution, searchsorted has a "side" option that takes care of this.
self.total_length = self.num_demos.sum()
# Load data from all tasks.
self.files = []
# for i in range(len(self.task_list)):
for i in range(len(self.task_list)):
self.files.append( np.load("{0}/{1}/New_Task_Demo_Array.npy".format(self.dataset_directory, self.task_list[i]), allow_pickle=True))
# # Seems to follow joint angles order:
# # ('time','right_j0', 'head_pan', 'right_j1', 'right_j2', 'right_j3', 'right_j4', 'right_j5', 'right_j6', 'r_gripper_l_finger_joint', 'r_gripper_r_finger_joint', 'Milk0', 'Bread0', 'Cereal0', 'Can0').
# # Extract these into...
# self.joint_angle_indices = [1,3,4,5,6,7,8]
# self.gripper_indices = [9,10]
# self.ds_freq = 20
# # self.r_gripper_r_finger_joint = np.array([-0.0116, 0.020833])
# # self.r_gripper_l_finger_joint = np.array([-0.020833, 0.0135])
# # [l,r]
# # gripper_open = [0.0115, -0.0115]
# # gripper_closed = [-0.020833, 0.020833]
def __len__(self):
return self.total_length
def __getitem__(self, index):
if index>=self.total_length:
print("Out of bounds of dataset.")
return None
# Get bucket that index falls into based on num_demos array.
task_index = np.searchsorted(self.cummulative_num_demos, index, side='right')-1
# Decide task ID, and new index modulo num_demos.
# Subtract number of demonstrations in cumsum until then, and then
new_index = index-self.cummulative_num_demos[max(task_index,0)]
data_element = self.files[task_index][new_index]
resample_length = len(data_element['demo'])//self.args.ds_freq
# print("Orig:", len(data_element['demo']),"New length:",resample_length)
self.kernel_bandwidth = self.args.smoothing_kernel_bandwidth
if resample_length<=1 or index==4900 or index==537:
data_element['is_valid'] = False
else:
data_element['is_valid'] = True
if self.args.smoothen:
data_element['demo'] = gaussian_filter1d(data_element['demo'],self.kernel_bandwidth,axis=0,mode='nearest')
data_element['robot-state'] = gaussian_filter1d(data_element['robot-state'],self.kernel_bandwidth,axis=0,mode='nearest')
data_element['object-state'] = gaussian_filter1d(data_element['object-state'],self.kernel_bandwidth,axis=0,mode='nearest')
data_element['flat-state'] = gaussian_filter1d(data_element['flat-state'],self.kernel_bandwidth,axis=0,mode='nearest')
data_element['environment-name'] = self.environment_names[task_index]
data_element['task-id'] = task_index
if self.args.ds_freq>1:
data_element['demo'] = resample(data_element['demo'], resample_length)
data_element['robot-state'] = resample(data_element['robot-state'], resample_length)
data_element['object-state'] = resample(data_element['object-state'], resample_length)
data_element['flat-state'] = resample(data_element['flat-state'], resample_length)
return data_element
def get_number_task_demos(self, task_index):
return self.num_demos[task_index]
def get_task_demo(self, task_index, index):
if index>=self.num_demos[task_index]:
print("Out of bounds of dataset.")
return None
data_element = self.files[task_index][index]
resample_length = len(data_element['demo'])//self.args.ds_freq
# print("Orig:", len(data_element['demo']),"New length:",resample_length)
self.kernel_bandwidth = self.args.smoothing_kernel_bandwidth
if resample_length<=1 or data_element['robot-state'].shape[0]==0:
data_element['is_valid'] = False
else:
data_element['is_valid'] = True
if self.args.smoothen:
data_element['demo'] = gaussian_filter1d(data_element['demo'],self.kernel_bandwidth,axis=0,mode='nearest')
data_element['robot-state'] = gaussian_filter1d(data_element['robot-state'],self.kernel_bandwidth,axis=0,mode='nearest')
data_element['object-state'] = gaussian_filter1d(data_element['object-state'],self.kernel_bandwidth,axis=0,mode='nearest')
data_element['flat-state'] = gaussian_filter1d(data_element['flat-state'],self.kernel_bandwidth,axis=0,mode='nearest')
data_element['environment-name'] = self.environment_names[task_index]
if self.args.ds_freq>1:
data_element['demo'] = resample(data_element['demo'], resample_length)
data_element['robot-state'] = resample(data_element['robot-state'], resample_length)
data_element['object-state'] = resample(data_element['object-state'], resample_length)
data_element['flat-state'] = resample(data_element['flat-state'], resample_length)
return data_element
class Roboturk_Dataloader_Tester(unittest.TestCase):
def test_Roboturkdataloader(self):
self.dataset = Roboturk_Dataset()
# Check the first index of the dataset.
data_element = self.dataset[0]
validity = data_element['is_valid']
check_demo_data = (data_element['demo']==np.load("Test_Data/Roboturk_Dataloader_DE.npy")).all()
self.assertTrue(validity and check_demo_data)
if __name__ == '__main__':
# Run all tests defined for the dataloader.
unittest.main() | CausalSkillLearning-main | Experiments/Roboturk_DataLoader.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from headers import *
import DataLoaders, MIME_DataLoader, Roboturk_DataLoader, Mocap_DataLoader
from PolicyManagers import *
import TestClass
def return_dataset(args, data=None):
# The data parameter overrides the data in args.data.
# This is so that we can call return_dataset with source and target data for transfer setting.
if data is not None:
args.data = data
# Define Data Loader.
if args.data=='Continuous':
dataset = DataLoaders.ContinuousToyDataset(args.datadir)
elif args.data=='ContinuousNonZero':
dataset = DataLoaders.ContinuousNonZeroToyDataset(args.datadir)
elif args.data=='DeterGoal':
dataset = DataLoaders.DeterministicGoalDirectedDataset(args.datadir)
elif args.data=='MIME':
dataset = MIME_DataLoader.MIME_NewDataset()
elif args.data=='Roboturk':
dataset = Roboturk_DataLoader.Roboturk_NewSegmentedDataset(args)
elif args.data=='OrigRoboturk':
dataset = Roboturk_DataLoader.Roboturk_Dataset(args)
elif args.data=='FullRoboturk':
dataset = Roboturk_DataLoader.Roboturk_FullDataset(args)
elif args.data=='Mocap':
dataset = Mocap_DataLoader.Mocap_Dataset(args)
return dataset
class Master():
def __init__(self, arguments):
self.args = arguments
self.dataset = return_dataset(self.args)
# Now define policy manager.
if self.args.setting=='learntsub':
self.policy_manager = PolicyManager_Joint(self.args.number_policies, self.dataset, self.args)
elif self.args.setting=='pretrain_sub':
self.policy_manager = PolicyManager_Pretrain(self.args.number_policies, self.dataset, self.args)
elif self.args.setting=='baselineRL':
self.policy_manager = PolicyManager_BaselineRL(args=self.args)
elif self.args.setting=='downstreamRL':
self.policy_manager = PolicyManager_DownstreamRL(args=self.args)
elif self.args.setting=='DMP':
self.policy_manager = PolicyManager_DMPBaselines(self.args.number_policies, self.dataset, self.args)
elif self.args.setting=='imitation':
self.policy_manager = PolicyManager_Imitation(self.args.number_policies, self.dataset, self.args)
elif self.args.setting=='transfer' or self.args.setting=='cycle_transfer':
source_dataset = return_dataset(self.args, data=self.args.source_domain)
target_dataset = return_dataset(self.args, data=self.args.target_domain)
if self.args.setting=='transfer':
self.policy_manager = PolicyManager_Transfer(args=self.args, source_dataset=source_dataset, target_dataset=target_dataset)
elif self.args.setting=='cycle_transfer':
self.policy_manager = PolicyManager_CycleConsistencyTransfer(args=self.args, source_dataset=source_dataset, target_dataset=target_dataset)
if self.args.debug:
embed()
# Create networks and training operations.
self.policy_manager.setup()
def run(self):
if self.args.setting=='pretrain_sub' or self.args.setting=='pretrain_prior' or \
self.args.setting=='imitation' or self.args.setting=='baselineRL' or self.args.setting=='downstreamRL' or \
self.args.setting=='transfer' or self.args.setting=='cycle_transfer':
if self.args.train:
if self.args.model:
self.policy_manager.train(self.args.model)
else:
self.policy_manager.train()
else:
if self.args.setting=='pretrain_prior':
self.policy_manager.train(self.args.model)
else:
self.policy_manager.evaluate(model=self.args.model)
elif self.args.setting=='learntsub':
if self.args.train:
if self.args.model:
self.policy_manager.train(self.args.model)
else:
if self.args.subpolicy_model:
print("Just loading subpolicies.")
self.policy_manager.load_all_models(self.args.subpolicy_model, just_subpolicy=True)
self.policy_manager.train()
else:
# self.policy_manager.train(self.args.model)
self.policy_manager.evaluate(self.args.model)
# elif self.args.setting=='baselineRL' or self.args.setting=='downstreamRL':
# if self.args.train:
# if self.args.model:
# self.policy_manager.train(self.args.model)
# else:
# self.policy_manager.train()
elif self.args.setting=='DMP':
self.policy_manager.evaluate_across_testset()
def test(self):
if self.args.test_code:
loader = TestClass.TestLoaderWithKwargs()
suite = loader.loadTestsFromTestCase(TestClass.MetaTestClass, policy_manager=self.policy_manager)
unittest.TextTestRunner().run(suite)
def parse_arguments():
parser = argparse.ArgumentParser(description='Learning Skills from Demonstrations')
# Setup training.
parser.add_argument('--datadir', dest='datadir',type=str,default='../Data/ContData/')
parser.add_argument('--train',dest='train',type=int,default=0)
parser.add_argument('--debug',dest='debug',type=int,default=0)
parser.add_argument('--notes',dest='notes',type=str)
parser.add_argument('--name',dest='name',type=str,default=None)
parser.add_argument('--fake_batch_size',dest='fake_batch_size',type=int,default=1)
parser.add_argument('--batch_size',dest='batch_size',type=int,default=1)
parser.add_argument('--training_phase_size',dest='training_phase_size',type=int,default=500000)
parser.add_argument('--initial_counter_value',dest='initial_counter_value',type=int,default=0)
parser.add_argument('--data',dest='data',type=str,default='Continuous')
parser.add_argument('--setting',dest='setting',type=str,default='gtsub')
parser.add_argument('--test_code',dest='test_code',type=int,default=0)
parser.add_argument('--model',dest='model',type=str)
parser.add_argument('--logdir',dest='logdir',type=str,default='Experiment_Logs/')
parser.add_argument('--epochs',dest='epochs',type=int,default=500) # Number of epochs to train for. Reduce for Mocap.
# Training setting.
parser.add_argument('--discrete_z',dest='discrete_z',type=int,default=0)
# parser.add_argument('--transformer',dest='transformer',type=int,default=0)
parser.add_argument('--z_dimensions',dest='z_dimensions',type=int,default=64)
parser.add_argument('--number_layers',dest='number_layers',type=int,default=5)
parser.add_argument('--hidden_size',dest='hidden_size',type=int,default=64)
parser.add_argument('--environment',dest='environment',type=str,default='SawyerLift') # Defines robosuite environment for RL.
# Data parameters.
parser.add_argument('--traj_segments',dest='traj_segments',type=int,default=1) # Defines whether to use trajectory segments for pretraining or entire trajectories. Useful for baseline implementation.
parser.add_argument('--gripper',dest='gripper',type=int,default=1) # Whether to use gripper training in roboturk.
parser.add_argument('--ds_freq',dest='ds_freq',type=int,default=1) # Additional downsample frequency.
parser.add_argument('--condition_size',dest='condition_size',type=int,default=4)
parser.add_argument('--smoothen', dest='smoothen',type=int,default=0) # Whether to smoothen the original dataset.
parser.add_argument('--smoothing_kernel_bandwidth', dest='smoothing_kernel_bandwidth',type=float,default=3.5) # The smoothing bandwidth that is applied to data loader trajectories.
parser.add_argument('--new_gradient',dest='new_gradient',type=int,default=1)
parser.add_argument('--b_prior',dest='b_prior',type=int,default=1)
parser.add_argument('--constrained_b_prior',dest='constrained_b_prior',type=int,default=1) # Whether to use constrained b prior var network or just normal b prior one.
parser.add_argument('--reparam',dest='reparam',type=int,default=1)
parser.add_argument('--number_policies',dest='number_policies',type=int,default=4)
parser.add_argument('--fix_subpolicy',dest='fix_subpolicy',type=int,default=1)
parser.add_argument('--train_only_policy',dest='train_only_policy',type=int,default=0) # Train only the policy network and use a pretrained encoder. This is weird but whatever.
parser.add_argument('--load_latent',dest='load_latent',type=int,default=1) # Whether to load latent policy from model or not.
parser.add_argument('--subpolicy_model',dest='subpolicy_model',type=str)
parser.add_argument('--traj_length',dest='traj_length',type=int,default=10)
parser.add_argument('--skill_length',dest='skill_length',type=int,default=5)
parser.add_argument('--var_skill_length',dest='var_skill_length',type=int,default=0)
parser.add_argument('--display_freq',dest='display_freq',type=int,default=10000)
parser.add_argument('--save_freq',dest='save_freq',type=int,default=1)
parser.add_argument('--eval_freq',dest='eval_freq',type=int,default=20)
parser.add_argument('--perplexity',dest='perplexity',type=float,default=30,help='Value of perplexity fed to TSNE.')
parser.add_argument('--entropy',dest='entropy',type=int,default=0)
parser.add_argument('--var_entropy',dest='var_entropy',type=int,default=0)
parser.add_argument('--ent_weight',dest='ent_weight',type=float,default=0.)
parser.add_argument('--var_ent_weight',dest='var_ent_weight',type=float,default=2.)
parser.add_argument('--pretrain_bias_sampling',type=float,default=0.) # Defines percentage of trajectory within which to sample trajectory segments for pretraining.
parser.add_argument('--pretrain_bias_sampling_prob',type=float,default=0.)
parser.add_argument('--action_scale_factor',type=float,default=1)
parser.add_argument('--z_exploration_bias',dest='z_exploration_bias',type=float,default=0.)
parser.add_argument('--b_exploration_bias',dest='b_exploration_bias',type=float,default=0.)
parser.add_argument('--lat_z_wt',dest='lat_z_wt',type=float,default=0.1)
parser.add_argument('--lat_b_wt',dest='lat_b_wt',type=float,default=1.)
parser.add_argument('--z_probability_factor',dest='z_probability_factor',type=float,default=0.1)
parser.add_argument('--b_probability_factor',dest='b_probability_factor',type=float,default=0.1)
parser.add_argument('--subpolicy_clamp_value',dest='subpolicy_clamp_value',type=float,default=-5)
parser.add_argument('--latent_clamp_value',dest='latent_clamp_value',type=float,default=-5)
parser.add_argument('--min_variance_bias',dest='min_variance_bias',type=float,default=0.01)
parser.add_argument('--normalization',dest='normalization',type=str,default='None')
parser.add_argument('--likelihood_penalty',dest='likelihood_penalty',type=int,default=10)
parser.add_argument('--subpolicy_ratio',dest='subpolicy_ratio',type=float,default=0.01)
parser.add_argument('--latentpolicy_ratio',dest='latentpolicy_ratio',type=float,default=0.1)
parser.add_argument('--temporal_latentpolicy_ratio',dest='temporal_latentpolicy_ratio',type=float,default=0.)
parser.add_argument('--latent_loss_weight',dest='latent_loss_weight',type=float,default=0.1)
parser.add_argument('--kl_weight',dest='kl_weight',type=float,default=0.01)
parser.add_argument('--var_loss_weight',dest='var_loss_weight',type=float,default=1.)
parser.add_argument('--prior_weight',dest='prior_weight',type=float,default=0.00001)
# Cross Domain Skill Transfer parameters.
parser.add_argument('--discriminability_weight',dest='discriminability_weight',type=float,default=1.,help='Weight of discriminability loss in cross domain skill transfer.')
parser.add_argument('--vae_loss_weight',dest='vae_loss_weight',type=float,default=1.,help='Weight of VAE loss in cross domain skill transfer.')
parser.add_argument('--alternating_phase_size',dest='alternating_phase_size',type=int,default=2000, help='Size of alternating training phases.')
parser.add_argument('--discriminator_phase_size',dest='discriminator_phase_size',type=int,default=2,help='Factor by which to train discriminator more than generator.')
parser.add_argument('--cycle_reconstruction_loss_weight',dest='cycle_reconstruction_loss_weight',type=float,default=1.,help='Weight of the cycle-consistency reconstruction loss term.')
# Exploration and learning rate parameters.
parser.add_argument('--epsilon_from',dest='epsilon_from',type=float,default=0.3)
parser.add_argument('--epsilon_to',dest='epsilon_to',type=float,default=0.05)
parser.add_argument('--epsilon_over',dest='epsilon_over',type=int,default=30)
parser.add_argument('--learning_rate',dest='learning_rate',type=float,default=1e-4)
# Baseline parameters.
parser.add_argument('--baseline_kernels',dest='baseline_kernels',type=int,default=15)
parser.add_argument('--baseline_window',dest='baseline_window',type=int,default=15)
parser.add_argument('--baseline_kernel_bandwidth',dest='baseline_kernel_bandwidth',type=float,default=3.5)
# Reinforcement Learning parameters.
parser.add_argument('--TD',dest='TD',type=int,default=0) # Whether or not to use Temporal difference while training the critic network.
parser.add_argument('--OU',dest='OU',type=int,default=1) # Whether or not to use the Ornstein Uhlenbeck noise process while training.
parser.add_argument('--OU_max_sigma',dest='OU_max_sigma',type=float,default=0.2) # Max Sigma value of the Ornstein Uhlenbeck noise process.
parser.add_argument('--OU_min_sigma',dest='OU_min_sigma',type=float,default=0.2) # Min Sigma value of the Ornstein Uhlenbeck noise process.
parser.add_argument('--MLP_policy',dest='MLP_policy',type=int,default=0) # Whether or not to use MLP policy.
parser.add_argument('--mean_nonlinearity',dest='mean_nonlinearity',type=int,default=0) # Whether or not to use Tanh activation.
parser.add_argument('--burn_in_eps',dest='burn_in_eps',type=int,default=500) # How many epsiodes to burn in.
parser.add_argument('--random_memory_burn_in',dest='random_memory_burn_in',type=int,default=1) # Whether to burn in episodes into memory randomly or not.
parser.add_argument('--shaped_reward',dest='shaped_reward',type=int,default=0) # Whether or not to use shaped rewards.
parser.add_argument('--memory_size',dest='memory_size',type=int,default=2000) # Size of replay memory. 2000 is okay, but is still kind of short sighted.
# Transfer learning domains, etc.
parser.add_argument('--source_domain',dest='source_domain',type=str,help='What the source domain is in transfer.')
parser.add_argument('--target_domain',dest='target_domain',type=str,help='What the target domain is in transfer.')
return parser.parse_args()
def main(args):
args = parse_arguments()
master = Master(args)
if args.test_code:
master.test()
else:
master.run()
if __name__=='__main__':
main(sys.argv)
| CausalSkillLearning-main | Experiments/Master.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from headers import *
def resample(original_trajectory, desired_number_timepoints):
original_traj_len = len(original_trajectory)
new_timepoints = np.linspace(0, original_traj_len-1, desired_number_timepoints, dtype=int)
return original_trajectory[new_timepoints]
class Mocap_Dataset(Dataset):
def __init__(self, args, split='all'):
self.dataset_directory = '/checkpoint/tanmayshankar/Mocap/'
self.args = args
# Load the entire set of trajectories.
self.data_list = np.load(os.path.join(self.dataset_directory, "Demo_Array.npy"),allow_pickle=True)
self.dataset_length = len(self.data_list)
self.ds_freq = self.args.ds_freq
def __len__(self):
# Return length of file list.
return self.dataset_length
def process_item(self, item):
resample_length = len(item['global_positions']) // self.ds_freq
if resample_length<5:
item['is_valid'] = False
else:
item['is_valid'] = True
item['global_positions'] = resample(item['global_positions'], resample_length)
demo = resample(item['local_positions'], resample_length)
item['local_positions'] = demo
item['local_rotations'] = resample(item['local_rotations'], resample_length)
item['animation'] = resample(item['animation'], resample_length)
# Replicate as demo for downstream dataloading. # Reshape to TxNumber of dimensions.
item['demo'] = demo.reshape((demo.shape[0],-1))
return item
def __getitem__(self, index):
# Return n'th item of dataset.
# This has already processed everything.
# Remember, the global and local posiitons are all stored as Number_Frames x Number_Joints x 3 array.
# Change this to # Number_Frames x Number_Dimensions...? But the dimensions are not independent.. so what do we do?
return self.process_item(copy.deepcopy(self.data_list[index]))
def compute_statistics(self):
embed() | CausalSkillLearning-main | Experiments/Mocap_DataLoader.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from headers import *
class DMP():
# def __init__(self, time_steps=100, num_ker=25, dimensions=3, kernel_bandwidth=None, alphaz=None, time_basis=False):
def __init__(self, time_steps=40, num_ker=15, dimensions=7, kernel_bandwidth=3.5, alphaz=5., time_basis=True):
# DMP(dimensions=7,time_steps=40,num_ker=15,kernel_bandwidth=3.5,alphaz=5.,time_basis=True)
# self.alphaz = 25.0
if alphaz is not None:
self.alphaz = alphaz
else:
self.alphaz = 10.
self.betaz = self.alphaz/4
self.alpha = self.alphaz/3
self.time_steps = time_steps
self.tau = self.time_steps
# self.tau = 1.
self.use_time_basis = time_basis
self.dimensions = dimensions
# self.number_kernels = max(500,self.time_steps)
self.number_kernels = num_ker
if kernel_bandwidth is not None:
self.kernel_bandwidth = kernel_bandwidth
else:
self.kernel_bandwidth = self.calculate_good_sigma(self.time_steps, self.number_kernels)
self.epsilon = 0.001
self.setup()
def setup(self):
self.gaussian_kernels = np.zeros((self.number_kernels,2))
self.weights = np.zeros((self.number_kernels, self.dimensions))
self.demo_pos = np.zeros((self.time_steps, self.dimensions))
self.demo_vel = np.zeros((self.time_steps, self.dimensions))
self.demo_acc = np.zeros((self.time_steps, self.dimensions))
self.target_forces = np.zeros((self.time_steps, self.dimensions))
self.phi = np.zeros((self.number_kernels, self.time_steps, self.time_steps))
self.eta = np.zeros((self.time_steps, self.dimensions))
self.vector_phase = np.zeros(self.time_steps)
# Defining Rollout variables.
self.rollout_time = self.time_steps
self.dt = 1./self.rollout_time
self.pos_roll = np.zeros((self.rollout_time,self.dimensions))
self.vel_roll = np.zeros((self.rollout_time,self.dimensions))
self.acc_roll = np.zeros((self.rollout_time,self.dimensions))
self.force_roll = np.zeros((self.rollout_time,self.dimensions))
self.goal = np.zeros(self.dimensions)
self.start = np.zeros(self.dimensions)
def calculate_good_sigma(self, time, number_kernels, threshold=0.15):
return time/(2*(number_kernels-1)*(np.sqrt(-np.log(threshold))))
def load_trajectory(self,pos,vel=None,acc=None):
self.demo_pos = np.zeros((self.time_steps, self.dimensions))
self.demo_vel = np.zeros((self.time_steps, self.dimensions))
self.demo_acc = np.zeros((self.time_steps, self.dimensions))
if vel is not None and acc is not None:
self.demo_pos = copy.deepcopy(pos)
self.demo_vel = copy.deepcopy(vel)
self.demo_acc = copy.deepcopy(acc)
else:
self.smooth_interpolate(pos)
def smooth_interpolate(self, pos):
# Filter the posiiton input by Gaussian smoothing.
smooth_pos = gaussian_filter1d(pos,3.5,axis=0,mode='nearest')
time_range = np.linspace(0, pos.shape[0]-1, pos.shape[0])
new_time_range = np.linspace(0,pos.shape[0]-1,self.time_steps+2)
self.interpolated_pos = np.zeros((self.time_steps+2,self.dimensions))
interpolating_objects = []
for i in range(self.dimensions):
interpolating_objects.append(interp1d(time_range,pos[:,i],kind='linear'))
self.interpolated_pos[:,i] = interpolating_objects[i](new_time_range)
self.demo_vel = np.diff(self.interpolated_pos,axis=0)[:self.time_steps]
self.demo_acc = np.diff(self.interpolated_pos,axis=0,n=2)[:self.time_steps]
self.demo_pos = self.interpolated_pos[:self.time_steps]
def initialize_variables(self):
self.weights = np.zeros((self.number_kernels, self.dimensions))
self.target_forces = np.zeros((self.time_steps, self.dimensions))
self.phi = np.zeros((self.number_kernels, self.time_steps, self.time_steps))
self.eta = np.zeros((self.time_steps, self.dimensions))
self.kernel_centers = np.linspace(0,self.time_steps,self.number_kernels)
self.vector_phase = self.calc_vector_phase(self.kernel_centers)
self.gaussian_kernels[:,0] = self.vector_phase
# Different kernel parameters that have worked before, giving different behavior.
# # dummy = (np.diff(self.gaussian_kernels[:,0]*0.55))**2
# # dummy = (np.diff(self.gaussian_kernels[:,0]*2))**2
# # dummy = (np.diff(self.gaussian_kernels[:,0]))**2
dummy = (np.diff(self.gaussian_kernels[:,0]*self.kernel_bandwidth))**2
self.gaussian_kernels[:,1] = 1. / np.append(dummy,dummy[-1])
# self.gaussian_kernels[:,1] = self.number_kernels/self.gaussian_kernels[:,0]
def calc_phase(self,time):
return np.exp(-self.alpha*float(time)/self.tau)
def calc_vector_phase(self,time):
return np.exp(-self.alpha*time.astype(float)/self.tau)
def basis(self,index,time):
return np.exp(-(self.gaussian_kernels[index,1])*((self.calc_phase(time)-self.gaussian_kernels[index,0])**2))
def time_basis(self, index, time):
# return np.exp(-(self.gaussian_kernels[index,1])*((time-self.kernel_centers[index])**2))
# return np.exp(-(time-self.kernel_centers[index])**2)
return np.exp(-((time-self.kernel_centers[index])**2)/(self.kernel_bandwidth))
def vector_basis(self, index, time_range):
return np.exp(-(self.gaussian_kernels[index,1])*((self.calc_vector_phase(time_range)-self.gaussian_kernels[index,0])**2))
def update_target_force_itau(self):
self.target_forces = (self.tau**2)*self.demo_acc - self.alphaz*(self.betaz*(self.demo_pos[self.time_steps-1]-self.demo_pos)-self.tau*self.demo_vel)
def update_target_force_dtau(self):
self.target_forces = self.demo_acc/(self.tau**2) - self.alphaz*(self.betaz*(self.demo_pos[self.time_steps-1]-self.demo_pos)-self.demo_vel/self.tau)
def update_target_force(self):
self.target_forces = self.demo_acc - self.alphaz*(self.betaz*(self.demo_pos[self.time_steps-1]-self.demo_pos)-self.demo_vel)
def update_phi(self):
for i in range(self.number_kernels):
for t in range(self.time_steps):
if self.use_time_basis:
self.phi[i,t,t] = self.time_basis(i,t)
else:
self.phi[i,t,t] = self.basis(i,t)
def update_eta(self):
t_range = np.linspace(0,self.time_steps,self.time_steps)
vector_phase = self.calc_vector_phase(t_range)
for k in range(self.dimensions):
self.eta[:,k] = vector_phase*(self.demo_pos[self.time_steps-1,k]-self.demo_pos[0,k])
def learn_DMP(self, pos, forces="i"):
self.setup()
self.load_trajectory(pos)
self.initialize_variables()
self.learn_weights(forces=forces)
def learn_weights(self, forces="i"):
if forces=="i":
self.update_target_force_itau()
elif forces=="d":
self.update_target_force_dtau()
elif forces=="n":
self.update_target_force()
self.update_phi()
self.update_eta()
for j in range(self.dimensions):
for i in range(self.number_kernels):
self.weights[i,j] = np.dot(self.eta[:,j],np.dot(self.phi[i],self.target_forces[:,j]))
self.weights[i,j] /= np.dot(self.eta[:,j],np.dot(self.phi[i],self.eta[:,j])) + self.epsilon
def initialize_rollout(self,start,goal,init_vel):
self.pos_roll = np.zeros((self.rollout_time,self.dimensions))
self.vel_roll = np.zeros((self.rollout_time,self.dimensions))
self.acc_roll = np.zeros((self.rollout_time,self.dimensions))
self.tau = self.rollout_time
self.pos_roll[0] = copy.deepcopy(start)
self.vel_roll[0] = copy.deepcopy(init_vel)
self.goal = goal
self.start = start
self.dt = self.tau/self.rollout_time
# print(self.dt,self.tau,self.rollout_time)
def calc_rollout_force(self, roll_time):
den = 0
time = copy.deepcopy(roll_time)
for i in range(self.number_kernels):
if self.use_time_basis:
self.force_roll[roll_time] += self.time_basis(i,time)*self.weights[i]
den += self.time_basis(i,time)
else:
self.force_roll[roll_time] += self.basis(i,time)*self.weights[i]
den += self.basis(i,time)
self.force_roll[roll_time] *= (self.goal-self.start)*self.calc_phase(time)/den
def calc_rollout_acceleration(self,time):
self.acc_roll[time] = (1./self.tau**2)*(self.alphaz * (self.betaz * (self.goal - self.pos_roll[time]) - self.tau*self.vel_roll[time]) + self.force_roll[time])
def calc_rollout_vel(self,time):
self.vel_roll[time] = self.vel_roll[time-1] + self.acc_roll[time-1]*self.dt
def calc_rollout_pos(self,time):
self.pos_roll[time] = self.pos_roll[time-1] + self.vel_roll[time-1]*self.dt
def rollout(self,start,goal,init_vel):
self.initialize_rollout(start,goal,init_vel)
self.calc_rollout_force(0)
self.calc_rollout_acceleration(0)
for i in range(1,self.rollout_time):
self.calc_rollout_force(i)
self.calc_rollout_vel(i)
self.calc_rollout_pos(i)
self.calc_rollout_acceleration(i)
return self.pos_roll
def load_weights(self, weight):
self.weights = copy.deepcopy(weight)
def main(args):
pos = np.load(str(sys.argv[1]))[:,:3]
vel = np.load(str(sys.argv[2]))[:,:3]
acc = np.load(str(sys.argv[3]))[:,:3]
rolltime = 500
dmp = DMP(rolltime)
dmp.load_trajectory(pos)
dmp.initialize_variables()
dmp.learn_DMP()
start = np.zeros(dmp.dimensions)
goal = np.ones(dmp.dimensions)
norm_vector = pos[-1]-pos[0]
init_vel = np.divide(vel[0],norm_vector)
dmp.rollout(start, goal, init_vel)
dmp.save_rollout()
| CausalSkillLearning-main | Experiments/DMP.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import glob, os, sys, argparse
import torch, copy
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from IPython import embed
import matplotlib
matplotlib.use('Agg')
# matplotlib.rcParams['animation.ffmpeg_args'] = '-report'
matplotlib.rcParams['animation.bitrate'] = 2000
import matplotlib.pyplot as plt
import tensorboardX
from scipy import stats
from absl import flags
from memory_profiler import profile
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from IPython import embed
import pdb
import sklearn.manifold as skl_manifold
from sklearn.decomposition import PCA
from matplotlib.offsetbox import (TextArea, DrawingArea, OffsetImage,
AnnotationBbox)
from matplotlib.animation import FuncAnimation
import tensorflow as tf
import tempfile
import moviepy.editor as mpy
import subprocess
import h5py
import time
import robosuite
import unittest
import cProfile
from scipy import stats, signal
from scipy.interpolate import interp1d
from scipy.ndimage.filters import gaussian_filter1d
from scipy.signal import find_peaks, argrelextrema
from sklearn.neighbors import NearestNeighbors
| CausalSkillLearning-main | Experiments/headers.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np, glob, os
from IPython import embed
# Env list.
environment_names = ["SawyerPickPlaceBread","SawyerPickPlaceCan","SawyerPickPlaceCereal","SawyerPickPlaceMilk","SawyerNutAssemblyRound","SawyerNutAssemblySquare"]
# Evaluate baselineRL methods.
a = 86
b = 86
a = 130
b = 137
prefix = 'RL'
increment = 100
reward_list = []
for i in range(a,b+1):
model_template = "RL{0}/saved_models/Model_epoch*".format(i)
models = glob.glob(model_template)
# number_models = [int((model.lstrip("RL{0}/saved_models/Model_epoch".format(i))).zfill(4)) for model in models]
max_model = int(models[-1].lstrip("RL{0}/saved_models/Model_epoch".format(i)))
model_range = np.arange(0,max_model+increment,increment)
rewards = np.zeros((len(model_range)))
for j in range(len(model_range)):
rewards[j] = np.load("RL{0}/MEval/m{1}/Mean_Reward_RL{0}.npy".format(i,model_range[j]))
reward_list.append(rewards)
embed()
# x = np.arange(0,260,20)
# dists = np.zeros((6,len(x),100))
# a = 6
# b = 12
# for i in range(a,b):
# for j in range(len(x)):
# dists[i-a,j] = np.load("IL0{0}/MEval/m{1}/Total_Rewards_IL0{0}.npy".format(str(i).zfill(2),x[j]))
# IL
a = 18
b = 23
prefix = 'IL0'
increment = 20
reward_list = []
for i in range(a,b+1):
model_template = "{0}{1}/saved_models/Model_epoch*".format(prefix,i)
models = glob.glob(model_template)
# number_models = [int((model.lstrip("RL{0}/saved_models/Model_epoch".format(i))).zfill(4)) for model in models]
max_model = int(models[-1].lstrip("{0}{1}/saved_models/Model_epoch".format(prefix,i)))
model_range = np.arange(0,max_model+increment,increment)
rewards = np.zeros((len(model_range)))
for j in range(len(model_range)):
rewards[j] = np.load("{2}{0}/MEval/m{1}/Mean_Reward_{2}{0}.npy".format(i,model_range[j],prefix))
reward_list.append(rewards)
# Get distances
a = 30
b = 37
prefix = 'RJ'
increment = 20
distance_list = []
for i in range(a,b+1):
model_template = "{0}{1}/saved_models/Model_epoch*".format(prefix,i)
models = glob.glob(model_template)
# number_models = [int((model.lstrip("RL{0}/saved_models/Model_epoch".format(i))).zfill(4)) for model in models]
max_model = int(models[-1].lstrip("{0}{1}/saved_models/Model_epoch".format(prefix,i)))
max_model = max_model-max_model%increment
model_range = np.arange(0,max_model+increment,increment)
distances = np.zeros((len(model_range)))
for j in range(len(model_range)):
distances[j] = np.load("{2}{0}/MEval/m{1}/Mean_Trajectory_Distance_{2}{0}.npy".format(i,model_range[j],prefix))
distance_list.append(distances)
################################################
# Env list.
environment_names = ["SawyerPickPlaceBread","SawyerPickPlaceCan","SawyerPickPlaceCereal","SawyerPickPlaceMilk","SawyerNutAssemblyRound","SawyerNutAssemblySquare"]
# Evaluate baselineRL methods.
a = 5
b = 12
prefix = 'downRL'
increment = 20
reward_list = []
for i in range(a,b+1):
padded_index = str(i).zfill(3)
model_template = "{1}{0}/saved_models/Model_epoch*".format(padded_index,prefix)
models = glob.glob(model_template)
# number_models = [int((model.lstrip("RL{0}/saved_models/Model_epoch".format(i))).zfill(4)) for model in models]
max_model = int(models[-1].lstrip("{1}{0}/saved_models/Model_epoch".format(padded_index,prefix)))
max_model = max_model-max_model%increment
model_range = np.arange(0,max_model+increment,increment)
rewards = np.zeros((len(model_range)))
for j in range(len(model_range)):
rewards[j] = np.load("{2}{0}/MEval/m{1}/Mean_Reward_{2}{0}.npy".format(padded_index,model_range[j],prefix))
# rewards[j] = np.load("{0}{1}/MEval/m{2}/Mean_Reward_{0}{1}.npy".format(prefix,padded_indexi,model_range[j],prefix))
reward_list.append(rewards)
##############################################
# MOcap distances
# Get distances
a = 1
b = 2
prefix = 'Mocap00'
increment = 20
distance_list = []
for i in range(a,b+1):
model_template = "{0}{1}/saved_models/Model_epoch*".format(prefix,i)
models = glob.glob(model_template)
# number_models = [int((model.lstrip("RL{0}/saved_models/Model_epoch".format(i))).zfill(4)) for model in models]
max_model = int(models[-1].lstrip("{0}{1}/saved_models/Model_epoch".format(prefix,i)))
max_model = max_model-max_model%increment
model_range = np.arange(0,max_model+increment,increment)
distances = np.zeros((len(model_range)))
for j in range(len(model_range)):
distances[j] = np.load("{2}{0}/MEval/m{1}/Mean_Trajectory_Distance_{2}{0}.npy".format(i,model_range[j],prefix))
distance_list.append(distances)
##############################################
################################################
# Env list.
environment_names = ["SawyerPickPlaceBread","SawyerPickPlaceCan","SawyerPickPlaceCereal","SawyerPickPlaceMilk","SawyerNutAssemblyRound","SawyerNutAssemblySquare"]
def remove_start(inputstring, word_to_remove):
return inputstring[len(word_to_remove):] if inputstring.startswith(word_to_remove) else inputstring
# Evaluate baselineRL methods.
a = 23
b = 28
prefix = 'downRL_pi'
increment = 20
reward_list = []
for i in range(a,b+1):
padded_index = str(i).zfill(3)
model_template = "{1}{0}/saved_models/Model_epoch*".format(padded_index,prefix)
models = glob.glob(model_template)
# number_models = [int((model.lstrip("RL{0}/saved_models/Model_epoch".format(i))).zfill(4)) for model in models]
# max_model = int(models[-1].lstrip("{1}{0}/saved_models/Model_epoch".format(padded_index,prefix)))
max_model = int(remove_start(models[-1],"{1}{0}/saved_models/Model_epoch".format(padded_index,prefix)))
max_model = max_model-max_model%increment
model_range = np.arange(0,max_model+increment,increment)
rewards = np.zeros((len(model_range)))
for j in range(len(model_range)-1):
rewards[j] = np.load("{2}{0}/MEval/m{1}/Mean_Reward_{2}{0}.npy".format(padded_index,model_range[j],prefix))
# rewards[j] = np.load("{0}{1}/MEval/m{2}/Mean_Reward_{0}{1}.npy".format(prefix,padded_indexi,model_range[j],prefix))
reward_list.append(rewards)
for i in range(a,b+1):
print("For environment: ", environment_names[i-a])
print("Average reward:", np.array(reward_list[i-a]).max())
def evalrl(a,b):
prefix = 'downRL_pi'
increment = 20
reward_list = []
for i in range(a,b+1):
padded_index = str(i).zfill(3)
model_template = "{1}{0}/saved_models/Model_epoch*".format(padded_index,prefix)
models = glob.glob(model_template)
# number_models = [int((model.lstrip("RL{0}/saved_models/Model_epoch".format(i))).zfill(4)) for model in models]
# max_model = int(models[-1].lstrip("{1}{0}/saved_models/Model_epoch".format(padded_index,prefix)))
max_model = int(remove_start(models[-1],"{1}{0}/saved_models/Model_epoch".format(padded_index,prefix)))
max_model = max_model-max_model%increment
model_range = np.arange(0,max_model+increment,increment)
rewards = np.zeros((len(model_range)))
for j in range(len(model_range)-1):
rewards[j] = np.load("{2}{0}/MEval/m{1}/Mean_Reward_{2}{0}.npy".format(padded_index,model_range[j],prefix))
# rewards[j] = np.load("{0}{1}/MEval/m{2}/Mean_Reward_{0}{1}.npy".format(prefix,padded_indexi,model_range[j],prefix))
reward_list.append(rewards)
for i in range(a,b+1):
print("For environment: ", environment_names[i-a])
print("Average reward:", np.array(reward_list[i-a]).max())
def evalrl(a,b):
prefix = 'RL'
increment = 20
reward_list = []
for i in range(a,b+1):
padded_index = str(i).zfill(2)
model_template = "{1}{0}/saved_models/Model_epoch*".format(padded_index,prefix)
models = glob.glob(model_template)
# number_models = [int((model.lstrip("RL{0}/saved_models/Model_epoch".format(i))).zfill(4)) for model in models]
# max_model = int(models[-1].lstrip("{1}{0}/saved_models/Model_epoch".format(padded_index,prefix)))
max_model = int(remove_start(models[-1],"{1}{0}/saved_models/Model_epoch".format(padded_index,prefix)))
max_model = max_model-max_model%increment
model_range = np.arange(0,max_model+increment,increment)
rewards = np.zeros((len(model_range)))
for j in range(len(model_range)-1):
rewards[j] = np.load("{2}{0}/MEval/m{1}/Mean_Reward_{2}{0}.npy".format(padded_index,model_range[j],prefix))
# rewards[j] = np.load("{0}{1}/MEval/m{2}/Mean_Reward_{0}{1}.npy".format(prefix,padded_indexi,model_range[j],prefix))
reward_list.append(rewards)
for i in range(a,b+1):
print("For environment: ", environment_names[i-a])
print("Average reward:", np.array(reward_list[i-a]).max()) | CausalSkillLearning-main | Experiments/Eval_RLRewards.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from mocap_processing.motion.pfnn import Animation, BVH
from basecode.render import glut_viewer as viewer
from basecode.render import gl_render, camera
from basecode.utils import basics
from basecode.math import mmMath
import numpy as np, imageio
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import time, threading
from IPython import embed
global whether_to_render
whether_to_render = False
def init():
global whether_to_render, global_positions, counter, joint_parents, done_with_render, save_path, name_prefix, image_list
whether_to_render = False
done_with_render = False
global_positions = None
joint_parents = None
save_path = "/private/home/tanmayshankar/Research/Code/"
name_prefix = "Viz_Image"
image_list = []
counter = 0
# Define function to load animation file.
def load_animation(bvh_filename):
animation, joint_names, time_per_frame = BVH.load(bvh_filename)
joint_parents = animation.parents
global_positions = Animation.positions_global(animation)
return global_positions, joint_parents, time_per_frame
# Function that draws body of animated character from the global positions.
def render_pose_by_capsule(global_positions, frame_num, joint_parents, scale=1.0, color=[0.5, 0.5, 0.5, 1], radius=0.05):
glPushMatrix()
glScalef(scale, scale, scale)
for i in range(len(joint_parents)):
pos = global_positions[frame_num][i]
# gl_render.render_point(pos, radius=radius, color=color)
j = joint_parents[i]
if j!=-1:
pos_parent = global_positions[frame_num][j]
p = 0.5 * (pos_parent + pos)
l = np.linalg.norm(pos_parent-pos)
R = mmMath.getSO3FromVectors(np.array([0, 0, 1]), pos_parent-pos)
gl_render.render_capsule(mmMath.Rp2T(R,p), l, radius, color=color, slice=16)
glPopMatrix()
# Callback that renders one pose.
def render_callback_time_independent():
global global_positions, joint_parents, counter
if counter<global_positions.shape[0]:
gl_render.render_ground(size=[100, 100], color=[0.8, 0.8, 0.8], axis='z', origin=True, use_arrow=True)
# Render Shadow of Character
glEnable(GL_DEPTH_TEST)
glDisable(GL_LIGHTING)
glPushMatrix()
glTranslatef(0, 0, 0.001)
glScalef(1, 1, 0)
render_pose_by_capsule(global_positions, counter, joint_parents, color=[0.5,0.5,0.5,1.0])
glPopMatrix()
# Render Character
glEnable(GL_LIGHTING)
render_pose_by_capsule(global_positions, counter, joint_parents, color=np.array([85, 160, 173, 255])/255.0)
# Callback that runs rendering when the global variable is set to true.
def idle_callback():
# # Increment counter
# # Set frame number of trajectory to be rendered
# # Using the time independent rendering.
# # Call drawGL and savescreen.
# # Since this is an idle callback, drawGL won't call itself (only calls render callback).
global whether_to_render, counter, global_positions, done_with_render, save_path, name_prefix
done_with_render = False
# if whether_to_render and counter<global_positions.shape[0]:
if whether_to_render and counter<10:
# print("Whether to render is actually true, with counter:",counter)
# render_callback_time_independent()
viewer.drawGL()
viewer.save_screen(save_path, "Image_{}_{}".format(name_prefix, counter))
# viewer.save_screen("/home/tanmayshankar/Research/Code/","Visualize_Image_{}".format(counter))
counter += 1
# Set whether to render to false if counter exceeded.
# if counter>=global_positions.shape[0]:
if counter>=10:
whether_to_render = False
done_with_render = True
# If whether to render is false, reset the counter.
else:
counter = 0
def idle_callback_return():
# # Increment counter
# # Set frame number of trajectory to be rendered
# # Using the time independent rendering.
# # Call drawGL and savescreen.
# # Since this is an idle callback, drawGL won't call itself (only calls render callback).
global whether_to_render, counter, global_positions, done_with_render, save_path, name_prefix, image_list
done_with_render = False
if whether_to_render and counter<global_positions.shape[0]:
# if whether_to_render and counter<10:
# print("Whether to render is actually true, with counter:",counter)
# render_callback_time_independent()
viewer.drawGL()
name = "Image_{}_{}".format(name_prefix, counter)
viewer.save_screen(save_path, name)
img = imageio.imread(os.path.join(save_path, name+".png"))
image_list.append(img)
counter += 1
# Set whether to render to false if counter exceeded.
if counter>=global_positions.shape[0]:
# if counter>=10:
whether_to_render = False
done_with_render = True
# If whether to render is false, reset the counter.
else:
counter = 0 | CausalSkillLearning-main | Experiments/MocapVisualizationUtils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import MocapVisualizationUtils
import threading, time, numpy as np
# bvh_filename = "/home/tanmayshankar/Research/Code/CausalSkillLearning/Experiments/01_01_poses.bvh"
bvh_filename = "/private/home/tanmayshankar/Research/Code/CausalSkillLearning/Experiments/01_01_poses.bvh"
filenames = [bvh_filename]
file_num = 0
print("About to run viewer.")
cam_cur = MocapVisualizationUtils.camera.Camera(pos=np.array([6.0, 0.0, 2.0]),
origin=np.array([0.0, 0.0, 0.0]),
vup=np.array([0.0, 0.0, 1.0]),
fov=45.0)
def run_thread():
MocapVisualizationUtils.viewer.run(
title='BVH viewer',
cam=cam_cur,
size=(1280, 720),
keyboard_callback=None,
render_callback=MocapVisualizationUtils.render_callback_time_independent,
idle_callback=MocapVisualizationUtils.idle_callback,
)
def run_thread():
MocapVisualizationUtils.viewer.run(
title='BVH viewer',
cam=cam_cur,
size=(1280, 720),
keyboard_callback=None,
render_callback=MocapVisualizationUtils.render_callback_time_independent,
idle_callback=MocapVisualizationUtils.idle_callback_return,
)
# Run init before loading animation.
MocapVisualizationUtils.init()
MocapVisualizationUtils.global_positions, MocapVisualizationUtils.joint_parents, MocapVisualizationUtils.time_per_frame = MocapVisualizationUtils.load_animation(filenames[file_num])
thread = threading.Thread(target=run_thread)
thread.start()
print("Going to actually call callback now.")
MocapVisualizationUtils.whether_to_render = True
x_count = 0
while MocapVisualizationUtils.done_with_render==False and MocapVisualizationUtils.whether_to_render==True:
x_count += 1
time.sleep(1)
print("x_count is now: ",x_count)
print("We finished with the visualization!")
| CausalSkillLearning-main | Experiments/MocapVisualizationExample.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Debugging cycle consistency transfer.
python Master.py --name=CTdebug --train=1 --setting=cycle_transfer --source_domain=ContinuousNonZero --target_domain=ContinuousNonZero --z_dimensions=64 --number_layers=5 --hidden_size=64 --data=ContinuousNonZero --training_phase_size=10000 --display_freq=1000 --eval_freq=4 --alternating_phase_size=200 --discriminator_phase_size=2 --vae_loss_weight=1. --discriminability_weight=2.0 --kl_weight=0.001
| CausalSkillLearning-main | Experiments/Code_Runs/CycleTransfer_Runs.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from headers import *
class GridWorldDataset(Dataset):
# Class implementing instance of dataset class for gridworld data.
def __init__(self, dataset_directory):
self.dataset_directory = dataset_directory
# For us, this is Research/Code/GraphPlanningNetworks/scripts/DatasetPlanning/CreateDemos/Demos2
self.action_map = np.array([[-1,0],[1,0],[0,-1],[0,1],[-1,-1],[-1,1],[1,-1],[1,1]])
## UP, DOWN, LEFT, RIGHT, UPLEFT, UPRIGHT, DOWNLEFT, DOWNRIGHT. ##
def __len__(self):
# Find out how many images we've stored.
filelist = glob.glob(os.path.join(self.dataset_directory,"*.png"))
# FOR NOW: USE ONLY till 3200 images.
return 3200
# return len(filelist)
def parse_trajectory_actions(self, coordinate_trajectory):
# Takes coordinate trajectory, returns action index taken.
state_diffs = np.diff(coordinate_trajectory,axis=0)
action_sequence = np.zeros((len(state_diffs)),dtype=int)
for i in range(len(state_diffs)):
for k in range(len(self.action_map)):
if (state_diffs[i]==self.action_map[k]).all():
action_sequence[i]=k
return action_sequence.astype(float)
def __getitem__(self, index):
# The getitem function must return a Map-Trajectory pair.
# We will handle per-timestep processes within our code.
# Assumes index is within range [0,len(filelist)-1]
image = cv2.imread(os.path.join(self.dataset_directory,"Image{0}.png".format(index)))
coordinate_trajectory = np.load(os.path.join(self.dataset_directory,"Image{0}_Traj1.npy".format(index))).astype(float)
action_sequence = self.parse_trajectory_actions(coordinate_trajectory)
return image, coordinate_trajectory, action_sequence | CausalSkillLearning-main | DataLoaders/GridWorld_DataLoader.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .headers import *
import os.path as osp
import pdb
import scipy.misc
flags.DEFINE_integer('n_data_workers', 4, 'Number of data loading workers')
flags.DEFINE_integer('batch_size', 1, 'Batch size. Code currently only handles bs=1')
flags.DEFINE_string('MIME_dir', '/checkpoint/tanmayshankar/MIME/', 'Data Directory')
flags.DEFINE_string('MIME_imgs_dir', '/checkpoint/shubhtuls/data/MIME/', 'Data Directory')
flags.DEFINE_integer('img_h', 64, 'Height')
flags.DEFINE_integer('img_w', 128, 'Width')
flags.DEFINE_integer('ds_freq', 20, 'Downsample joint trajectories by this fraction. Original recroding rate = 100Hz')
def resample(original_trajectory, desired_number_timepoints):
original_traj_len = len(original_trajectory)
new_timepoints = np.linspace(0, original_traj_len-1, desired_number_timepoints, dtype=int)
return original_trajectory[new_timepoints]
class MIME_Img_Dataset(Dataset):
'''
Class implementing instance of dataset class for MIME data.
'''
def __init__(self, opts, split='all'):
self.dataset_directory = opts.MIME_dir
self.imgs_dataset_directory = opts.MIME_imgs_dir
self.img_h = opts.img_h
self.img_w = opts.img_w
# Default: /checkpoint/tanmayshankar/MIME/
self.fulltext = osp.join(self.dataset_directory, 'MIME_jointangles/*/*/joint_angles.txt')
self.filelist = glob.glob(self.fulltext)
self.ds_freq = opts.ds_freq
with open(self.filelist[0], 'r') as file:
lines = file.readlines()
self.joint_names = sorted(eval(lines[0].rstrip('\n')).keys())
if split == 'all':
self.filelist = self.filelist
else:
self.task_lists = np.load(os.path.join(
self.dataset_directory, 'MIME_jointangles/{}_Lists.npy'.format(split.capitalize())))
self.filelist = []
for i in range(20):
self.filelist.extend(self.task_lists[i])
self.filelist = [f.replace('/checkpoint/tanmayshankar/MIME/', opts.MIME_dir) for f in self.filelist]
def __len__(self):
# Return length of file list.
return len(self.filelist)
def __getitem__(self, index):
'''
# Returns Joint Angles as:
# List of length Number_Timesteps, with each element of the list a dictionary containing the sequence of joint angles.
# Assumes index is within range [0,len(filelist)-1]
'''
file = self.filelist[index]
file_split = file.split('/')
frames_folder = osp.join(self.imgs_dataset_directory, file_split[-3], file_split[-2], 'frames')
n_frames = len(os.listdir(frames_folder))
imgs = []
frame_inds = [0, n_frames//2, n_frames-1]
for fi in frame_inds:
img = scipy.misc.imread(osp.join(frames_folder, 'im_{}.png'.format(fi+1)))
imgs.append(scipy.misc.imresize(img, (self.img_h, self.img_w)))
imgs = np.stack(imgs)
left_gripper = np.loadtxt(os.path.join(os.path.split(file)[0],'left_gripper.txt'))
right_gripper = np.loadtxt(os.path.join(os.path.split(file)[0],'right_gripper.txt'))
joint_angle_trajectory = []
# Open file.
with open(file, 'r') as file:
lines = file.readlines()
for line in lines:
dict_element = eval(line.rstrip('\n'))
if len(dict_element.keys()) == len(self.joint_names):
array_element = np.array([dict_element[joint] for joint in self.joint_names])
joint_angle_trajectory.append(array_element)
joint_angle_trajectory = np.array(joint_angle_trajectory)
n_samples = len(joint_angle_trajectory) // self.ds_freq
elem = {}
elem['imgs'] = imgs
elem['joint_angle_trajectory'] = resample(joint_angle_trajectory, n_samples)
elem['left_gripper'] = resample(left_gripper, n_samples)/100
elem['right_gripper'] = resample(right_gripper, n_samples)/100
elem['is_valid'] = int(np.linalg.norm(np.diff(elem['joint_angle_trajectory'],axis=0),axis=1).max() < 1.0)
return elem
def recreate_dictionary(self, arm, joint_angles):
if arm=="left":
offset = 2
width = 7
elif arm=="right":
offset = 9
width = 7
elif arm=="full":
offset = 0
width = len(self.joint_names)
return dict((self.joint_names[i],joint_angles[i-offset]) for i in range(offset,offset+width))
# ------------ Data Loader ----------- #
# ------------------------------------ #
def data_loader(opts, split='all', shuffle=True):
dset = MIME_Img_Dataset(opts, split=split)
return DataLoader(
dset,
batch_size=opts.batch_size,
shuffle=shuffle,
num_workers=opts.n_data_workers,
drop_last=True)
| CausalSkillLearning-main | DataLoaders/MIME_Img_DataLoader.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .headers import *
from . import MIME_DataLoader
opts = flags.FLAGS
def main(_):
dataset = MIME_DataLoader.MIME_Dataset(opts)
print("Created DataLoader.")
embed()
if __name__ == '__main__':
app.run(main) | CausalSkillLearning-main | DataLoaders/InteractiveDataLoader.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .headers import *
import os.path as osp
import pdb
# flags.DEFINE_integer('n_data_workers', 4, 'Number of data loading workers')
# flags.DEFINE_integer('batch_size', 1, 'Batch size. Code currently only handles bs=1')
# flags.DEFINE_string('MIME_dir', '/checkpoint/tanmayshankar/MIME/', 'Data Directory')
flags.DEFINE_enum('arm', 'both', ['left', 'right', 'both'], 'Which arms data to load')
class Plan_Dataset(Dataset):
'''
Class implementing instance of dataset class for MIME data.
'''
def __init__(self, opts, split='all'):
self.opts = opts
self.split = split
self.dataset_directory = self.opts.MIME_dir
# # Must consider permutations of arm and split.
# Right Arm: New_Plans / Run*_EE_Plan
# / Run*_Joint_Plan
# / Run*_RG_Traj
# Left Arm: New_Plans_Left / Run*_EE_Plan
# / Run*_Joint_Plan
# / Run*_LG_traj
# Both Arms: Ambidextrous_Plans / Run*_EE_Plan
# / Run*_Joint_Plan
# / Run*_Grip_Traj
# Set these parameters to replace.
if self.opts.arm=='left':
folder = 'New_Plans'
gripper_suffix = "_LG_Traj"
elif self.opts.arm=='right':
folder = 'New_Plans_Left'
gripper_suffix = "_RG_Traj"
elif self.opts.arm=='both':
folder = 'Ambidextrous_Plans'
gripper_suffix = "_Grip_Traj"
# Default: /checkpoint/tanmayshankar/MIME/
if self.split=='all':
# Collect list of all EE Plans, we will select all Joint Angle Plans correspondingly.
self.fulltext = osp.join(self.dataset_directory, 'MIME_jointangles/*/*/New_Plans/Run*_EE_Plan.npy')
# Joint angle plans filelist is in same order thanks to glob.
self.jatext = osp.join(self.dataset_directory, 'MIME_jointangles/*/*/New_Plans/Run*_Joint_Plan.npy')
# Gripper plans filelist is in same order thanks to glob.
# self.rgtext = osp.join(self.dataset_directory, 'MIME_jointangles/*/*/New_Plans/Run*_RG_Traj.npy')
self.filelist = sorted(glob.glob(self.fulltext))
self.joint_filelist = sorted(glob.glob(self.jatext))
# self.gripper_filelist = sorted(glob.glob(self.rgtext))
elif self.split=='train':
self.filelist = np.load(os.path.join(self.dataset_directory,"MIME_jointangles/Plan_Lists/PlanTrainList.npy"))
self.joint_filelist = np.load(os.path.join(self.dataset_directory,"MIME_jointangles/Plan_Lists/PlanJointTrainList.npy"))
elif self.split=='val':
self.filelist = np.load(os.path.join(self.dataset_directory,"MIME_jointangles/Plan_Lists/PlanValList.npy"))
self.joint_filelist = np.load(os.path.join(self.dataset_directory,"MIME_jointangles/Plan_Lists/PlanJointValList.npy"))
elif self.split=='test':
self.filelist = np.load(os.path.join(self.dataset_directory,"MIME_jointangles/Plan_Lists/PlanTestList.npy"))
self.joint_filelist = np.load(os.path.join(self.dataset_directory,"MIME_jointangles/Plan_Lists/PlanJointTestList.npy"))
# the loaded np arrays give byte strings, and not strings, which breaks later code
if not isinstance(self.filelist[0], str):
self.filelist = [f.decode() for f in self.filelist]
self.joint_filelist = [f.decode() for f in self.joint_filelist]
# Now replace terms in filelists based on what arm it is.
# The EE file list only needs folder replaced.
self.filelist = [f.replace("New_Plans",folder).replace('/checkpoint/tanmayshankar/MIME',self.opts.MIME_dir) for f in self.filelist]
# The Joint file list also only needs folder replaced.
self.joint_filelist = [f.replace("New_Plans",folder).replace('/checkpoint/tanmayshankar/MIME',self.opts.MIME_dir) for f in self.joint_filelist]
# Since we didn't create split lists for Gripper, use the filelist and replace to Gripper.
self.gripper_filelist = [f.replace("New_Plans",folder).replace("_EE_Plan",gripper_suffix).replace('/checkpoint/tanmayshankar/MIME',self.opts.MIME_dir) for f in self.filelist]
# Set joint names.
self.left_joint_names = ['left_s0','left_s1','left_e0','left_e1','left_w0','left_w1','left_w2']
self.right_joint_names = ['right_s0','right_s1','right_e0','right_e1','right_w0','right_w1','right_w2']
self.both_joint_names = self.left_joint_names+self.right_joint_names
def __len__(self):
# Return length of file list.
return len(self.filelist)
def __getitem__(self, index):
file = self.filelist[index]
joint_file = self.joint_filelist[index]
gripper_file = self.gripper_filelist[index]
# Load items.
elem = {}
elem['EE_Plan'] = np.load(file)
elem['JA_Plan'] = np.load(joint_file)
elem['Grip_Plan'] = np.load(gripper_file)/100
return elem
# ------------ Data Loader ----------- #
# ------------------------------------ #
def data_loader(opts, split='all', shuffle=True):
dset = Plan_Dataset(opts, split=split)
return DataLoader(
dset,
batch_size=opts.batch_size,
shuffle=shuffle,
num_workers=opts.n_data_workers,
drop_last=True)
| CausalSkillLearning-main | DataLoaders/Plan_DataLoader.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from headers import *
class GridWorldDataset(Dataset):
# Class implementing instance of dataset class for gridworld data.
def __init__(self, dataset_directory):
self.dataset_directory = dataset_directory
# For us, this is Research/Code/GraphPlanningNetworks/scripts/DatasetPlanning/CreateDemos/Demos2
self.action_map = np.array([[-1,0],[1,0],[0,-1],[0,1],[-1,-1],[-1,1],[1,-1],[1,1]])
## UP, DOWN, LEFT, RIGHT, UPLEFT, UPRIGHT, DOWNLEFT, DOWNRIGHT. ##
def __len__(self):
# Find out how many images we've stored.
filelist = glob.glob(os.path.join(self.dataset_directory,"*.png"))
return 4000
# return len(filelist)
def parse_trajectory_actions(self, coordinate_trajectory):
# Takes coordinate trajectory, returns action index taken.
state_diffs = np.diff(coordinate_trajectory,axis=0)
action_sequence = np.zeros((len(state_diffs)),dtype=int)
for i in range(len(state_diffs)):
for k in range(len(self.action_map)):
if (state_diffs[i]==self.action_map[k]).all():
action_sequence[i]=k
return action_sequence.astype(float)
def __getitem__(self, index):
# The getitem function must return a Map-Trajectory pair.
# We will handle per-timestep processes within our code.
# Assumes index is within range [0,len(filelist)-1]
image = np.load(os.path.join(self.dataset_directory,"Map{0}.npy".format(index)))
time_limit = 20
coordinate_trajectory = np.load(os.path.join(self.dataset_directory,"Map{0}_Traj1.npy".format(index))).astype(float)[:time_limit]
action_sequence = self.parse_trajectory_actions(coordinate_trajectory)
return image, coordinate_trajectory, action_sequence | CausalSkillLearning-main | DataLoaders/SmallMaps_DataLoader.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import random as stdlib_random, string
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from absl import flags, app
import torch
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import default_collate
from ..utils import plotting as plot_util
flags.DEFINE_integer('n_data_workers', 4, 'Number of data loading workers')
flags.DEFINE_integer('batch_size', 1, 'Batch size. Code currently only handles bs=1')
flags.DEFINE_integer('n_segments_min', 4, 'Min Number of gt segments per trajectory')
flags.DEFINE_integer('n_segments_max', 4, 'Max number of gt segments per trajectory')
dirs_2d = np.array([
[1,0],
[0,1],
[-1,0],
[0,-1]
])
def vis_walk(walk):
'''
Args:
walk: (nT+1) X 2 array
Returns:
im: 200 X 200 X 4 numpy array
'''
t = walk.shape[0]
xs = walk[:,0]
ys = walk[:,1]
color_inds = np.linspace(0, 255, t).astype(np.int).tolist()
cs = plot_util.colormap[color_inds, :]
fig = plt.figure(figsize=(4, 4), dpi=50)
ax = fig.subplots()
ax.scatter(xs, ys, c=cs)
ax.set_xlim(-2, 2)
ax.set_ylim(-2, 2)
ax.set_aspect('equal', 'box')
ax.tick_params(
axis='x',
which='both',
bottom=False,
top=False,
labelbottom=False)
ax.tick_params(
axis='y',
which='both',
left=False,
right=False,
labelleft=False)
fig.tight_layout()
fname = '/tmp/' + ''.join(stdlib_random.choices(string.ascii_letters, k=8)) + '.png'
fig.savefig(fname)
plt.close(fig)
im = plt.imread(fname)
os.remove(fname)
return im
def walk_segment(origin, direction, n_steps=10, step_size=0.1, noise=0.02, rng=None):
'''
Args:
origin: nd numpy array
direction: nd numpy array with unit norm
n_steps: length of time seq
step_size: size of each step
noise: magintude of max actuation noise
Returns:
segment: n_steps X nd array
note that the first position in segment is different from origin
'''
if rng is None:
rng = np.random
nd = origin.shape[0]
segment = np.zeros((n_steps, nd)) + origin
segment += np.arange(1, n_steps+1).reshape((-1,1))*direction*step_size
segment += rng.uniform(low=-1, high=1, size=(n_steps, nd)) * noise/nd
return segment
def random_walk2d(origin, num_segments=4, rng=None):
'''
Args:
origin: 2d numpy array
num_segments: length of time seq
Returns:
walk: (nT+1) X 2 array
'''
if rng is None:
rng = np.random
dir_ind = rng.randint(4)
walk = origin.reshape(1,2)
seg_lengths = []
for s in range(num_segments):
seg_length = rng.randint(6,10)
seg_lengths.append(seg_length)
step_size = 0.1 + (rng.uniform() - 0.5)*0.05
segment = walk_segment(origin, dirs_2d[dir_ind], n_steps=seg_length, step_size=step_size, rng=rng)
origin = segment[-1]
walk = np.concatenate((walk, segment), axis=0)
dir_ind += 2 * rng.randint(2) -1
dir_ind = dir_ind % 4
return walk, seg_lengths
class RandomWalksDataset(Dataset):
def __init__(self, opts):
self.opts = opts
self.n_segments_min = self.opts.n_segments_min
self.n_segments_max = self.opts.n_segments_max
def __len__(self):
return int(1e6)
def __getitem__(self, ix):
rng = np.random.RandomState(ix)
ns = rng.randint(self.n_segments_min, self.n_segments_max+1)
trajectory, self.seg_lengths_ix = random_walk2d(np.zeros(2), num_segments=ns, rng=rng)
return trajectory
# ------------ Data Loader ----------- #
# ------------------------------------ #
def data_loader(opts, shuffle=True):
dset = RandomWalksDataset(opts)
return DataLoader(
dset,
batch_size=opts.batch_size,
shuffle=shuffle,
num_workers=opts.n_data_workers,
drop_last=True)
if __name__ == '__main__':
walk = random_walk2d(np.zeros(2), num_segments=4)
print(walk)
| CausalSkillLearning-main | DataLoaders/RandomWalks.py |
CausalSkillLearning-main | DataLoaders/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .headers import *
import os.path as osp
flags.DEFINE_integer('n_data_workers', 4, 'Number of data loading workers')
flags.DEFINE_integer('batch_size', 1, 'Batch size. Code currently only handles bs=1')
flags.DEFINE_string('MIME_dir', '/checkpoint/tanmayshankar/MIME/', 'Data Directory')
# flags.DEFINE_boolean('downsampling', True, 'Whether to downsample trajectories. ')
flags.DEFINE_integer('ds_freq', 20, 'Downsample joint trajectories by this fraction. Original recroding rate = 100Hz')
flags.DEFINE_boolean('remote', False, 'Whether operating from a remote server or not.')
# opts = flags.FLAGS
def select_baxter_angles(trajectory, joint_names, arm='right'):
# joint names in order as used via mujoco visualizer
baxter_joint_names = ['right_s0', 'right_s1', 'right_e0', 'right_e1', 'right_w0', 'right_w1', 'right_w2', 'left_s0', 'left_s1', 'left_e0', 'left_e1', 'left_w0', 'left_w1', 'left_w2']
if arm == 'right':
select_joints = baxter_joint_names[:7]
elif arm == 'left':
select_joints = baxter_joint_names[7:]
elif arm == 'both':
select_joints = baxter_joint_names
inds = [joint_names.index(j) for j in select_joints]
return trajectory[:, inds]
def resample(original_trajectory, desired_number_timepoints):
original_traj_len = len(original_trajectory)
new_timepoints = np.linspace(0, original_traj_len-1, desired_number_timepoints, dtype=int)
return original_trajectory[new_timepoints]
class MIME_Dataset(Dataset):
'''
Class implementing instance of dataset class for MIME data.
'''
def __init__(self, opts, split='all'):
self.dataset_directory = opts.MIME_dir
# Default: /checkpoint/tanmayshankar/MIME/
self.fulltext = osp.join(self.dataset_directory, 'MIME_jointangles/*/*/joint_angles.txt')
if opts.remote:
self.suff_filelist = np.load(osp.join(self.dataset_directory,"Suffix_Filelist.npy"))
self.filelist = []
for j in range(len(self.suff_filelist)):
self.filelist.append(osp.join(self.dataset_directory,self.suff_filelist[j]))
else:
self.filelist = glob.glob(self.fulltext)
self.ds_freq = opts.ds_freq
with open(self.filelist[0], 'r') as file:
lines = file.readlines()
self.joint_names = sorted(eval(lines[0].rstrip('\n')).keys())
if split == 'all':
self.filelist = self.filelist
else:
self.task_lists = np.load(os.path.join(
self.dataset_directory, 'MIME_jointangles/{}_Lists.npy'.format(split.capitalize())))
self.filelist = []
for i in range(20):
self.filelist.extend(self.task_lists[i])
self.filelist = [f.replace('/checkpoint/tanmayshankar/MIME/', opts.MIME_dir) for f in self.filelist]
# print(len(self.filelist))
def __len__(self):
# Return length of file list.
return len(self.filelist)
def __getitem__(self, index):
'''
# Returns Joint Angles as:
# List of length Number_Timesteps, with each element of the list a dictionary containing the sequence of joint angles.
# Assumes index is within range [0,len(filelist)-1]
'''
file = self.filelist[index]
left_gripper = np.loadtxt(os.path.join(os.path.split(file)[0],'left_gripper.txt'))
right_gripper = np.loadtxt(os.path.join(os.path.split(file)[0],'right_gripper.txt'))
orig_left_traj = np.load(osp.join(osp.split(file)[0], 'Left_EE.npy'))
orig_right_traj = np.load(osp.join(osp.split(file)[0], 'Right_EE.npy'))
joint_angle_trajectory = []
# Open file.
with open(file, 'r') as file:
lines = file.readlines()
for line in lines:
dict_element = eval(line.rstrip('\n'))
if len(dict_element.keys()) == len(self.joint_names):
# some files have extra lines with gripper keys e.g. MIME_jointangles/4/12405Nov19/joint_angles.txt
array_element = np.array([dict_element[joint] for joint in self.joint_names])
joint_angle_trajectory.append(array_element)
joint_angle_trajectory = np.array(joint_angle_trajectory)
n_samples = len(orig_left_traj) // self.ds_freq
elem = {}
elem['joint_angle_trajectory'] = resample(joint_angle_trajectory, n_samples)
elem['left_trajectory'] = resample(orig_left_traj, n_samples)
elem['right_trajectory'] = resample(orig_right_traj, n_samples)
elem['left_gripper'] = resample(left_gripper, n_samples)/100
elem['right_gripper'] = resample(right_gripper, n_samples)/100
elem['path_prefix'] = os.path.split(self.filelist[index])[0]
elem['ra_trajectory'] = select_baxter_angles(elem['joint_angle_trajectory'], self.joint_names, arm='right')
elem['la_trajectory'] = select_baxter_angles(elem['joint_angle_trajectory'], self.joint_names, arm='left')
# If max norm of differences is <1.0, valid.
elem['is_valid'] = int(np.linalg.norm(np.diff(elem['joint_angle_trajectory'],axis=0),axis=1).max() < 1.0)
return elem
def recreate_dictionary(self, arm, joint_angles):
if arm=="left":
offset = 2
width = 7
elif arm=="right":
offset = 9
width = 7
elif arm=="full":
offset = 0
width = len(self.joint_names)
return dict((self.joint_names[i],joint_angles[i-offset]) for i in range(offset,offset+width))
# ------------ Data Loader ----------- #
# ------------------------------------ #
def data_loader(opts, split='all', shuffle=True):
dset = MIME_Dataset(opts, split=split)
return DataLoader(
dset,
batch_size=opts.batch_size,
shuffle=shuffle,
num_workers=opts.n_data_workers,
drop_last=True)
| CausalSkillLearning-main | DataLoaders/MIME_DataLoader.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
A convenience script to playback random demonstrations from
a set of demonstrations stored in a hdf5 file.
Arguments:
--folder (str): Path to demonstrations
--use_actions (optional): If this flag is provided, the actions are played back
through the MuJoCo simulator, instead of loading the simulator states
one by one.
Example:
$ python playback_demonstrations_from_hdf5.py --folder ../models/assets/demonstrations/SawyerPickPlace/
"""
import os
import h5py
import argparse
import random
import numpy as np
import robosuite
from robosuite.utils.mjcf_utils import postprocess_model_xml
from IPython import embed
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--folder",
type=str,
default=os.path.join(
robosuite.models.assets_root, "demonstrations/SawyerNutAssembly"
),
)
parser.add_argument(
"--use-actions",
action='store_true',
)
args = parser.parse_args()
demo_path = args.folder
hdf5_path = os.path.join(demo_path, "demo.hdf5")
f = h5py.File(hdf5_path, "r")
env_name = f["data"].attrs["env"]
env = robosuite.make(
env_name,
has_renderer=False,
# has_renderer=True,
ignore_done=True,
use_camera_obs=False,
gripper_visualization=True,
reward_shaping=True,
control_freq=100,
)
# list of all demonstrations episodes
demos = list(f["data"].keys())
while True:
print("Playing back random episode... (press ESC to quit)")
# # select an episode randomly
ep = random.choice(demos)
# read the model xml, using the metadata stored in the attribute for this episode
model_file = f["data/{}".format(ep)].attrs["model_file"]
model_path = os.path.join(demo_path, "models", model_file)
with open(model_path, "r") as model_f:
model_xml = model_f.read()
env.reset()
xml = postprocess_model_xml(model_xml)
env.reset_from_xml_string(xml)
env.sim.reset()
# env.viewer.set_camera(0)
# load the flattened mujoco states
states = f["data/{}/states".format(ep)].value
if args.use_actions:
# load the initial state
env.sim.set_state_from_flattened(states[0])
env.sim.forward()
# load the actions and play them back open-loop
jvels = f["data/{}/joint_velocities".format(ep)].value
grip_acts = f["data/{}/gripper_actuations".format(ep)].value
actions = np.concatenate([jvels, grip_acts], axis=1)
num_actions = actions.shape[0]
for j, action in enumerate(actions):
env.step(action)
# env.render()
if j < num_actions - 1:
# ensure that the actions deterministically lead to the same recorded states
state_playback = env.sim.get_state().flatten()
embed()
assert(np.all(np.equal(states[j + 1], state_playback)))
else:
print("Embedding in not use actions branch")
embed()
# force the sequence of internal mujoco states one by one
for state in states:
env.sim.set_state_from_flattened(state)
env.sim.forward()
# env.render()
f.close() | CausalSkillLearning-main | DataLoaders/RoboturkeExp.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from .headers import *
import os.path as osp
from io import open
import unicodedata
import string
import re
import random
import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
flags.DEFINE_integer('n_data_workers', 4, 'Number of data loading workers')
flags.DEFINE_integer('batch_size', 1, 'Batch size. Code currently only handles bs=1')
flags.DEFINE_string('lang_dir', '/private/home/shubhtuls/code/sfd/cachedir/data/lang/', 'Data Directory')
SOS_token = 0
EOS_token = 1
class Lang:
def __init__(self, name):
self.name = name
self.word2index = {}
self.word2count = {}
self.index2word = {0: "SOS", 1: "EOS"}
self.n_words = 2 # Count SOS and EOS
def addSentence(self, sentence):
for word in sentence.split(' '):
self.addWord(word)
def addWord(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
# Turn a Unicode string to plain ASCII, thanks to
# https://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
# Lowercase, trim, and remove non-letter characters
def normalizeString(s):
s = unicodeToAscii(s.lower().strip())
s = re.sub(r"([.!?])", r" \1", s)
s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)
return s
def readLangs(data_dir, lang1, lang2, reverse=False):
print("Reading lines...")
# Read the file and split into lines
lines = open(osp.join(data_dir, '%s-%s.txt' % (lang1, lang2)), encoding='utf-8').\
read().strip().split('\n')
# Split every line into pairs and normalize
pairs = [[normalizeString(s) for s in l.split('\t')] for l in lines]
# Reverse pairs, make Lang instances
if reverse:
pairs = [list(reversed(p)) for p in pairs]
input_lang = Lang(lang2)
output_lang = Lang(lang1)
else:
input_lang = Lang(lang1)
output_lang = Lang(lang2)
return input_lang, output_lang, pairs
MAX_LENGTH = 10
eng_prefixes = (
"i am ", "i m ",
"he is", "he s ",
"she is", "she s ",
"you are", "you re ",
"we are", "we re ",
"they are", "they re "
)
def filterPair(p):
return len(p[0].split(' ')) < MAX_LENGTH and \
len(p[1].split(' ')) < MAX_LENGTH
# and \
# p[1].startswith(eng_prefixes)
def filterPairs(pairs):
return [pair for pair in pairs if filterPair(pair)]
def prepareData(data_dir, lang1, lang2, reverse=False):
input_lang, output_lang, pairs = readLangs(data_dir, lang1, lang2, reverse)
print("Read %s sentence pairs" % len(pairs))
pairs = filterPairs(pairs)
print("Trimmed to %s sentence pairs" % len(pairs))
print("Counting words...")
for pair in pairs:
input_lang.addSentence(pair[0])
output_lang.addSentence(pair[1])
print("Counted words:")
print(input_lang.name, input_lang.n_words)
print(output_lang.name, output_lang.n_words)
return input_lang, output_lang, pairs
def indexesFromSentence(lang, sentence):
return [lang.word2index[word] for word in sentence.split(' ')]
def tensorFromSentence(lang, sentence):
indexes = indexesFromSentence(lang, sentence)
indexes.append(EOS_token)
return torch.tensor(indexes, dtype=torch.long, device=device).view(-1, 1)
class TranslationDataset(Dataset):
'''
Class implementing instance of dataset class for MIME data.
'''
def __init__(self, opts):
self.dataset_directory = opts.lang_dir
self.l1, self.l2, self.pairs = prepareData(self.dataset_directory, 'eng', 'fra', reverse=False)
def __len__(self):
# Return length of file list.
return len(self.l1)
def tensorsFromPair(self, pair):
input_tensor = tensorFromSentence(self.l1, pair[0])
target_tensor = tensorFromSentence(self.l2, pair[1])
return (input_tensor, target_tensor)
def __getitem__(self, index):
elem = {}
elem['pair'] = self.pairs[index]
elem['l1'], elem['l2'] = self.tensorsFromPair(elem['pair'])
return elem | CausalSkillLearning-main | DataLoaders/Translation.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import glob, cv2, os
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from absl import flags
from IPython import embed
from absl import flags, app | CausalSkillLearning-main | DataLoaders/headers.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .headers import *
import os.path as osp
flags.DEFINE_integer('n_data_workers', 4, 'Number of data loading workers')
flags.DEFINE_integer('batch_size', 1, 'Batch size. Code currently only handles bs=1')
flags.DEFINE_string('MIME_dir', '/checkpoint/tanmayshankar/MIME/', 'Data Directory')
# flags.DEFINE_boolean('downsampling', True, 'Whether to downsample trajectories. ')
flags.DEFINE_integer('ds_freq', 20, 'Downsample joint trajectories by this fraction. Original recroding rate = 100Hz')
flags.DEFINE_boolean('remote', False, 'Whether operating from a remote server or not.')
# opts = flags.FLAGS
def resample(original_trajectory, desired_number_timepoints):
original_traj_len = len(original_trajectory)
new_timepoints = np.linspace(0, original_traj_len-1, desired_number_timepoints, dtype=int)
return original_trajectory[new_timepoints]
class MIME_Dataset(Dataset):
'''
Class implementing instance of dataset class for MIME data.
'''
def __init__(self, opts):
self.dataset_directory = opts.MIME_dir
# Default: /checkpoint/tanmayshankar/MIME/
self.fulltext = osp.join(self.dataset_directory, 'MIME_jointangles/*/*/joint_angles.txt')
if opts.remote:
self.suff_filelist = np.load(osp.join(self.dataset_directory,"Suffix_Filelist.npy"))
self.filelist = []
for j in range(len(self.suff_filelist)):
self.filelist.append(osp.join(self.dataset_directory,self.suff_filelist[j]))
else:
self.filelist = sorted(glob.glob(self.fulltext))
self.ds_freq = opts.ds_freq
with open(self.filelist[0], 'r') as file:
print(self.filelist[0])
lines = file.readlines()
self.joint_names = sorted(eval(lines[0].rstrip('\n')).keys())
self.train_lists = np.load(os.path.join(self.dataset_directory,"MIME_jointangles/Train_Lists.npy"))
self.val_lists = np.load(os.path.join(self.dataset_directory,"MIME_jointangles/Val_Lists.npy"))
self.test_lists = np.load(os.path.join(self.dataset_directory,"MIME_jointangles/Test_Lists.npy"))
def __len__(self):
# Return length of file list.
return len(self.filelist)
def setup_splits(self):
self.train_filelist = []
self.val_filelist = []
self.test_filelist = []
for i in range(20):
self.train_filelist.extend(self.train_lists[i])
self.val_filelist.extend(self.val_lists[i])
self.test_filelist.extend(self.test_lists[i])
def getit(self, index, split=None, return_plan_run=None):
'''
# Returns Joint Angles as:
# List of length Number_Timesteps, with each element of the list a dictionary containing the sequence of joint angles.
# Assumes index is within range [0,len(filelist)-1]
'''
if split=="train":
file = self.train_filelist[index]
elif split=="val":
file = self.val_filelist[index]
elif split=="test":
file = self.test_filelist[index]
elif split is None:
file = self.filelist[index]
left_gripper = np.loadtxt(os.path.join(os.path.split(file)[0],'left_gripper.txt'))
right_gripper = np.loadtxt(os.path.join(os.path.split(file)[0],'right_gripper.txt'))
orig_left_traj = np.load(osp.join(osp.split(file)[0], 'Left_EE.npy'))
orig_right_traj = np.load(osp.join(osp.split(file)[0], 'Right_EE.npy'))
joint_angle_trajectory = []
folder = "New_Plans"
if return_plan_run is not None:
ee_plan = np.load(os.path.join(os.path.split(file)[0],"{0}/Run{1}_EE_Plan.npy".format(folder,return_plan_run)))
ja_plan = np.load(os.path.join(os.path.split(file)[0],"{0}/Run{1}_Joint_Plan.npy".format(folder,return_plan_run)))
# Open file.
with open(file, 'r') as file:
lines = file.readlines()
for line in lines:
dict_element = eval(line.rstrip('\n'))
if len(dict_element.keys()) == len(self.joint_names):
# some files have extra lines with gripper keys e.g. MIME_jointangles/4/12405Nov19/joint_angles.txt
array_element = np.array([dict_element[joint] for joint in self.joint_names])
joint_angle_trajectory.append(array_element)
joint_angle_trajectory = np.array(joint_angle_trajectory)
n_samples = len(orig_left_traj) // self.ds_freq
elem = {}
elem['joint_angle_trajectory'] = resample(joint_angle_trajectory, n_samples)
elem['left_trajectory'] = resample(orig_left_traj, n_samples)
elem['right_trajectory'] = resample(orig_right_traj, n_samples)
elem['left_gripper'] = resample(left_gripper, n_samples)
elem['right_gripper'] = resample(right_gripper, n_samples)
elem['path_prefix'] = os.path.split(self.filelist[index])[0]
elem['JA_Plan'] = ja_plan
elem['EE_Plan'] = ee_plan
return elem
def __getitem__(self, index, split=None, return_plan_run=None):
# def __getitem__(self, inputs):
'''
# Returns Joint Angles as:
# List of length Number_Timesteps, with each element of the list a dictionary containing the sequence of joint angles.
# Assumes index is within range [0,len(filelist)-1]
'''
if split=="train":
file = self.train_filelist[index]
elif split=="val":
file = self.val_filelist[index]
elif split=="test":
file = self.test_filelist[index]
elif split is None:
file = self.filelist[index]
left_gripper = np.loadtxt(os.path.join(os.path.split(file)[0],'left_gripper.txt'))
right_gripper = np.loadtxt(os.path.join(os.path.split(file)[0],'right_gripper.txt'))
orig_left_traj = np.load(osp.join(osp.split(file)[0], 'Left_EE.npy'))
orig_right_traj = np.load(osp.join(osp.split(file)[0], 'Right_EE.npy'))
joint_angle_trajectory = []
folder = "New_Plans"
if return_plan_run is not None:
ee_plan = np.load(os.path.join(os.path.split(file)[0],"{0}/Run{1}_EE_Plan.npy".format(folder,return_plan_run)))
ja_plan = np.load(os.path.join(os.path.split(file)[0],"{0}/Run{1}_JA_Plan.npy".format(folder,return_plan_run)))
# Open file.
with open(file, 'r') as file:
lines = file.readlines()
for line in lines:
dict_element = eval(line.rstrip('\n'))
if len(dict_element.keys()) == len(self.joint_names):
# some files have extra lines with gripper keys e.g. MIME_jointangles/4/12405Nov19/joint_angles.txt
array_element = np.array([dict_element[joint] for joint in self.joint_names])
joint_angle_trajectory.append(array_element)
joint_angle_trajectory = np.array(joint_angle_trajectory)
n_samples = len(orig_left_traj) // self.ds_freq
elem = {}
elem['joint_angle_trajectory'] = resample(joint_angle_trajectory, n_samples)
elem['left_trajectory'] = resample(orig_left_traj, n_samples)
elem['right_trajectory'] = resample(orig_right_traj, n_samples)
elem['left_gripper'] = resample(left_gripper, n_samples)
elem['right_gripper'] = resample(right_gripper, n_samples)
elem['path_prefix'] = os.path.split(self.filelist[index])[0]
elem['JA_Plan'] = ja_plan
elem['EE_Plan'] = ee_plan
return elem
def recreate_dictionary(self, arm, joint_angles):
if arm=="left":
offset = 2
width = 7
elif arm=="right":
offset = 9
width = 7
elif arm=="full":
offset = 0
width = len(self.joint_names)
return dict((self.joint_names[i],joint_angles[i-offset]) for i in range(offset,offset+width))
# ------------ Data Loader ----------- #
# ------------------------------------ #
def data_loader(opts, shuffle=True):
dset = MIME_Dataset(opts)
return DataLoader(
dset,
batch_size=opts.batch_size,
shuffle=shuffle,
num_workers=opts.n_data_workers,
drop_last=True)
| CausalSkillLearning-main | DataLoaders/MIMEandPlan_DataLoader.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
# For both arms and grippers.
python -m SkillsfromDemonstrations.Experiments.UseSkillsRL.TrainZPolicyRL --train --transformer --nz=64 --nh=64 --variable_nseg=False --network_dir=saved_models/T356_fnseg_vae_sl2pt0_kldwt0pt002_finetune --variable_ns=False --st_space=joint_both_gripper --vae_enc
"""
from __future__ import absolute_import
import os, sys, torch
import matplotlib.pyplot as plt
from ...DataLoaders import MIME_DataLoader
from ..abstraction import mime_eval
from ..abstraction.abstraction_utils import ScoreFunctionEstimator
from .PolicyNet import PolicyNetwork, PolicyNetworkSingleTimestep, AltPolicyNetworkSingleTimestep
from absl import app, flags
import imageio, numpy as np, copy, os, shutil
from IPython import embed
import robosuite
import tensorboard, tensorboardX
flags.DEFINE_boolean('train',False,'Whether to run train.')
flags.DEFINE_boolean('debug',False,'Whether to debug.')
# flags.DEFINE_float('sf_loss_wt', 0.1, 'Weight of pseudo loss for SF estimator')
# flags.DEFINE_float('kld_loss_wt', 0, 'Weight for KL Divergence loss if using VAE encoder.')
flags.DEFINE_float('reinforce_loss_wt', 1., 'Weight for primary reinforce loss.')
# flags.DEFINE_string('name',None,'Name to give run.')
class ZPolicyTrainer(object):
def __init__(self, opts):
self.opts = opts
self.input_size = self.opts.n_state
self.zpolicy_input_size = 85
self.hidden_size = 20
self.output_size = self.opts.nz
self.primitive_length = 10
self.learning_rate = 1e-4
self.number_epochs = 200
self.number_episodes = 500
self.save_every_epoch = 5
self.maximum_skills = 6
def initialize_plots(self):
self.log_dir = os.path.join("SkillsfromDemonstrations/cachedir/logs/RL",self.opts.name)
if not(os.path.isdir(self.log_dir)):
os.mkdir(self.log_dir)
self.writer = tensorboardX.SummaryWriter(self.log_dir)
def setup_networks(self):
# Set up evaluator to load mime model and stuff.
self.evaluator = mime_eval.PrimitiveDiscoverEvaluator(self.opts)
self.evaluator.setup_testing(split='val')
# Also create a ZPolicy.
# self.z_policy = PolicyNetworkSingleTimestep(opts=self.opts, input_size=self.zpolicy_input_size, hidden_size=self.hidden_size, output_size=self.output_size).cuda()
self.z_policy = AltPolicyNetworkSingleTimestep(opts=self.opts, input_size=self.zpolicy_input_size, hidden_size=self.hidden_size, output_size=self.output_size).cuda()
if self.opts.variable_nseg:
self.sf_loss_fn = ScoreFunctionEstimator()
# Creating optimizer.
self.z_policy_optimizer = torch.optim.Adam(self.z_policy.parameters(), lr=self.learning_rate)
def load_network(self, network_dir):
# Load the evaluator networks (Abstraction network and skill network)
self.evaluator.load_network(self.evaluator.model, 'pred', 'latest', network_dir=network_dir)
# Freeze parameters of the IntendedTrajectoryPredictorModel.
for parameter in self.evaluator.model.parameters():
parameter.require_grad = False
def save_zpolicy_model(self, path, suffix):
if not(os.path.isdir(path)):
os.mkdir(path)
save_object = {}
save_object['ZPolicy'] = self.z_policy.state_dict()
torch.save(save_object,os.path.join(path,"ZPolicyModel"+suffix))
def load_all_models(self, path):
load_object = torch.load(path)
self.z_policy.load_state_dict(load_object['ZPolicy'])
# def update_plots(self, counter, sample_map, loglikelihood):
def update_plots(self, counter):
if self.opts.variable_nseg:
self.writer.add_scalar('Stop_Prob_Reinforce_Loss', torch.mean(self.stop_prob_reinforce_loss), counter)
self.writer.add_scalar('Predicted_Zs_Reinforce_Loss', torch.mean(self.reinforce_predicted_Zs), counter)
self.writer.add_scalar('KL_Divergence_Loss', torch.mean(self.kld_loss_seq), counter)
self.writer.add_scalar('Total_Loss', torch.mean(self.total_loss), counter)
def assemble_input(self, trajectory):
traj_start = trajectory[0]
traj_end = trajectory[-1]
return torch.cat([torch.tensor(traj_start).cuda(),torch.tensor(traj_end).cuda()],dim=0)
# def update_networks(self, state_traj, reward_traj, predicted_Zs):
def update_networks(self, state_traj_torch, reward_traj, latent_z_seq, log_prob_seq, stop_prob_seq, stop_seq, kld_loss_seq):
# embed()
# Get cummulative rewards corresponding to actions executed after selecting a particular Z. -# This is basically adding up the rewards from the end of the array.
# cumm_reward_to_go = torch.cumsum(torch.tensor(reward_traj[::-1]).cuda().float())[::-1]
cumm_reward_to_go_numpy = copy.deepcopy(np.cumsum(copy.deepcopy(reward_traj[::-1]))[::-1])
cumm_reward_to_go = torch.tensor(cumm_reward_to_go_numpy).cuda().float()
self.total_loss = 0.
if self.opts.variable_nseg:
# Remember, this stop probability loss is for stopping predicting Z's, #NOT INTERMEDIATE TIMESTEPS!
# So we still use cumm_reward_to_go rather than cumm_reward_to_go_array
self.stop_prob_reinforce_loss = self.sf_loss_fn.forward(cumm_reward_to_go, stop_prob_seq.unsqueeze(1), stop_seq.long())
# Add reinforce loss and loss value.
self.total_loss += self.opts.sf_loss_wt*self.stop_prob_reinforce_loss
# Now adding the reinforce loss associated with predicted Zs.
# (Remember, we want to maximize reward times log prob, so multiply by -1 to minimize.)
self.reinforce_predicted_Zs = (self.opts.reinforce_loss_wt * -1. * cumm_reward_to_go*log_prob_seq.view(-1)).sum()
self.total_loss += self.reinforce_predicted_Zs
# Add loss term with KL Divergence between 0 mean Gaussian and predicted Zs.
self.kld_loss_seq = kld_loss_seq
self.total_loss += self.opts.kld_loss_wt*self.kld_loss_seq[0]
# Zero gradients of optimizer, compute backward, then step optimizer.
self.z_policy_optimizer.zero_grad()
self.total_loss.sum().backward()
self.z_policy_optimizer.step()
def reorder_actions(self, actions):
# Assume that the actions are 16 dimensional, and are ordered as:
# 7 DoF for left arm, 7 DoF for right arm, 1 for left gripper, and 1 for right gripper.
# The original trajectory has gripper values from 0 (Close) to 1 (Open), but we've to rescale to -1 (Open) to 1 (Close) for Mujoco.
# And handle joint velocities.
# MIME Gripper values are from 0 to 100 (Close to Open), but we assume actions has values from 0 to 1 (Close to Open), and then rescale to (-1 Open to 1 Close) for Mujoco.
# Mujoco needs them flipped.
indices = np.array([ 7, 8, 9, 10, 11, 12, 13, 0, 1, 2, 3, 4, 5, 6, 15, 14])
reordered_actions = actions[:,indices]
reordered_actions[:,14:] = 1 - 2*reordered_actions[:,14:]
return reordered_actions
def run_episode(self, counter):
# For number of epochs:
# # 1) Given start and goal (for reaching task, say)
# # 2) Run Z_Policy on start and goal to retrieve predicted Zs.
# # 3) Decode predicted Zs into trajectory.
# # 4) Retrieve "actions" from trajectory.
# # 5) Feed "actions" into RL environment and collect reward.
# # 6) Train ZPolicy to maximize cummulative reward with favorite RL algorithm.
# Reset environment.
state = self.environment.reset()
terminal = False
reward_traj = None
state_traj_torch = None
t_out = 0
stop = False
hidden = None
latent_z_seq = None
stop_prob_seq = None
stop_seq = None
log_prob_seq = None
kld_loss_seq = 0.
previous_state = None
while terminal==False and stop==False:
########################################################
######## 1) Collect input for first timestep. ##########
########################################################
zpolicy_input = np.concatenate([state['robot-state'],state['object-state']]).reshape(1,self.zpolicy_input_size)
########################################################
# 2) Feed into the Z policy to retrieve the predicted Z.
########################################################
latent_z, stop_probability, stop, log_prob, kld_loss, hidden = self.z_policy.forward(zpolicy_input, hidden=hidden)
latent_z = latent_z.squeeze(1)
########################################################
############## 3) Decode into trajectory. ##############
########################################################
primitive_and_skill_stop_prob = self.evaluator.model.primitive_decoder(latent_z)
traj_seg = primitive_and_skill_stop_prob[0].squeeze(1).detach().cpu().numpy()
if previous_state is None:
previous_state = traj_seg[-1].reshape(1,self.opts.n_state)
else:
# Concatenate previous state to trajectory, so that when we take actions we get an action from previous segment to the current one.
traj_seg = np.concatenate([previous_state,traj_seg],axis=0)
previous_state = traj_seg[-1].reshape(-1,self.opts.n_state)
########################################################
## 4) Finite diff along time axis to retrieve actions ##
########################################################
actions = np.diff(traj_seg,axis=0)
actions = self.reorder_actions(actions)
actions_torch = torch.tensor(actions).cuda().float()
cummulative_reward_in_segment = 0.
# Run step into evironment for all actions in this segment.
t = 0
while t<actions_torch.shape[0] and terminal==False:
# Step.
state, onestep_reward, terminal, success = self.environment.step(actions[t])
# Collect onestep_rewards within this segment.
cummulative_reward_in_segment += float(onestep_reward)
# Assuming we have fixed_ns (i.e. novariable_ns), we can use the set decoding length of primitives to assign cummulative reward-to-go values to the various predicted Z variables.
# (This is also why we need the reward history, and not just the cummulative rewards obtained over the course of training.
t+=1
# Everything is going to be set to None, so set variables.
# Do some bookkeeping in life.
if t_out==0:
state_traj_torch = torch.tensor(zpolicy_input).cuda().float().view(-1,self.zpolicy_input_size)
latent_z_seq = latent_z.view(-1,self.opts.nz)
stop_seq = stop.clone().detach().view(-1,1)
stop_prob_seq = stop_probability.view(-1,2)
log_prob_seq = log_prob.view(-1,1)
# reward_traj = torch.tensor(copy.deepcopy(cummulative_reward_in_segment)).cuda().float().view(-1,1)
reward_traj = np.array(cummulative_reward_in_segment).reshape((1,1))
else:
state_traj_torch = torch.cat([state_traj_torch, torch.tensor(zpolicy_input).cuda().float().view(-1,self.zpolicy_input_size)],dim=0)
latent_z_seq = torch.cat([latent_z_seq, latent_z.view(-1,self.opts.nz)], dim=0)
stop_seq = torch.cat([stop_seq, stop.view(-1,1)], dim=0)
stop_prob_seq = torch.cat([stop_prob_seq, stop_probability.view(-1,2)], dim=0)
log_prob_seq = torch.cat([log_prob_seq, log_prob.view(-1,1)], dim=0)
# reward_traj = torch.cat([reward_traj.view(-1,1), torch.tensor(copy.deepcopy(cummulative_reward_in_segment)).cuda().float().view(-1,1)])
reward_traj = np.concatenate([reward_traj, np.array(cummulative_reward_in_segment).reshape((1,1))], axis=0)
# Either way:
kld_loss_seq += kld_loss
t_out += 1
# print(t_out)
# Set to false by default.
if self.opts.variable_nseg==False:
stop = False
if t_out>=self.maximum_skills:
stop = True
# if self.opts.debug==True:
# embed()
if self.opts.train:
# 6) Feed states, actions, reward, and predicted Zs to update. (These are all lists of tensors.)
# self.update_networks(state_traj_torch, action_torch, reward_traj, latent_zs)
self.update_networks(state_traj_torch, reward_traj, latent_z_seq, log_prob_seq, stop_prob_seq, stop_seq, kld_loss_seq)
self.update_plots(counter)
def setup_RL_environment(self, has_display=False):
# Create Mujoco environment.
self.environment = robosuite.make("BaxterLift", has_renderer=has_display)
self.initialize_plots()
def trainRL(self):
# Basic function to train.
counter = 0
for e in range(self.number_epochs):
# Number of episodes per epoch.
for i in range(self.number_episodes):
print("#########################################")
print("Epoch: ",e,"Traj: ",i)
# Run an episode.
self.run_episode(counter)
counter += 1
if self.opts.train and e%self.save_every_epoch==0:
self.save_zpolicy_model(os.path.join("saved_models/RL",self.opts.name), "epoch{0}".format(e))
def main(_):
# This is only to be executed for notebooks.
# flags.FLAGS([''])
opts = flags.FLAGS
# Set state space.
if opts.st_space == 'ee_r' or opts.st_space == 'ee_l':
opts.n_state = 7
if opts.st_space == 'joint_ra' or opts.st_space == 'joint_la':
opts.n_state = 7
if opts.st_space == 'joint_both':
opts.n_state = 14
elif opts.st_space == 'ee_all':
opts.n_state = 14
elif opts.st_space == 'joint':
opts.n_state = 17
elif opts.st_space =='joint_both_gripper':
opts.n_state = 16
opts.logging_dir = os.path.join(opts.logging_dir, 'mime')
opts.transformer = True
torch.manual_seed(0)
# Create instance of class.
zpolicy_trainer = ZPolicyTrainer(opts)
zpolicy_trainer.setup_networks()
zpolicy_trainer.setup_RL_environment()
# Still need this to load primitive decoder network.
zpolicy_trainer.load_network(opts.network_dir)
zpolicy_trainer.trainRL()
if __name__ == '__main__':
app.run(main)
| CausalSkillLearning-main | DownstreamRL/TrainZPolicyRL.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from ..SkillNetwork.headers import *
from ..SkillNetwork.LSTMNetwork import LSTMNetwork, LSTMNetwork_Fixed
class PolicyNetwork(torch.nn.Module):
def __init__(self, opts, input_size, hidden_size, output_size, fixed=True):
super(PolicyNetwork, self).__init__()
self.opts = opts
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
if fixed:
self.lstmnet = LSTMNetwork_Fixed(input_size=input_size, hidden_size=hidden_size, output_size=output_size).cuda()
else:
self.lstmnet = LSTMNetwork(input_size=input_size, hidden_size=hidden_size, output_size=output_size).cuda()
# Create linear layer to split prediction into mu and sigma.
self.mu_linear_layer = torch.nn.Linear(self.opts.nz, self.opts.nz)
self.sig_linear_layer = torch.nn.Linear(self.opts.nz, self.opts.nz)
# Stopping probability predictor. (Softmax, not sigmoid)
self.stopping_probability_layer = torch.nn.Linear(self.hidden_size, 2)
self.softmax_layer = torch.nn.Softmax(dim=-1)
def forward(self, input):
format_input = torch.tensor(input).view(1,1,self.input_size).cuda().float()
predicted_Z_preparam, stop_probabilities = self.lstmnet.forward(format_input)
predicted_Z_preparam = predicted_Z_preparam.squeeze(1)
self.latent_z_seq = []
self.latent_mu_seq = []
self.latent_log_sigma_seq = []
self.kld_loss = 0.
t = 0
# Remember, the policy is Gaussian (so we can implement VAE-KLD on it).
latent_z_mu_seq = self.mu_linear_layer(predicted_Z_preparam)
latent_z_log_sig_seq = self.sig_linear_layer(predicted_Z_preparam)
# Compute standard deviation.
std = torch.exp(0.5*latent_z_log_sig_seq).cuda()
# Sample random variable.
eps = torch.randn_like(std).cuda()
self.latent_z_seq = latent_z_mu_seq+eps*std
# Compute KL Divergence Loss term here, so we don't have to return mu's and sigma's.
self.kld_loss = torch.zeros(1)
for t in range(latent_z_mu_seq.shape[0]):
# Taken from mime_plan_skill.py Line 159 - KL Divergence for Gaussian prior and Gaussian prediction.
self.kld_loss += -0.5 * torch.sum(1. + latent_z_log_sig_seq[t] - latent_z_mu_seq[t].pow(2) - latent_z_log_sig_seq[t].exp())
# Create distributions so that we can evaluate log probability.
self.dists = [torch.distributions.MultivariateNormal(loc = latent_z_mu_seq[t], covariance_matrix = std[t]*torch.eye((self.opts.nz)).cuda()) for t in range(latent_z_mu_seq.shape[0])]
# Evaluate log probability in forward so we don't have to do it elswhere.
self.log_probs = [self.dists[i].log_prob(self.latent_z_seq[i]) for i in range(self.latent_z_seq.shape[0])]
return self.latent_z_seq, stop_probabilities
class PolicyNetworkSingleTimestep(torch.nn.Module):
# Policy Network inherits from torch.nn.Module.
# Now we overwrite the init, forward functions. And define anything else that we need.
def __init__(self, opts, input_size, hidden_size, output_size):
# Ensures inheriting from torch.nn.Module goes nicely and cleanly.
super(PolicyNetworkSingleTimestep, self).__init__()
self.opts = opts
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.num_layers = 4
self.maximum_length = 15
# Define a bidirectional LSTM now.
self.lstm = torch.nn.LSTM(input_size=self.input_size,hidden_size=self.hidden_size,num_layers=self.num_layers)
# Define output layers for the LSTM, and activations for this output layer.
self.output_layer = torch.nn.Linear(self.hidden_size, self.output_size)
# Create linear layer to split prediction into mu and sigma.
self.mu_linear_layer = torch.nn.Linear(self.opts.nz, self.opts.nz)
self.sig_linear_layer = torch.nn.Linear(self.opts.nz, self.opts.nz)
# Stopping probability predictor. (Softmax, not sigmoid)
self.stopping_probability_layer = torch.nn.Linear(self.hidden_size, 2)
self.softmax_layer = torch.nn.Softmax(dim=-1)
self.logsoftmax_layer = torch.nn.LogSoftmax(dim=-1)
def forward(self, input, hidden=None):
# Input format must be: Sequence_Length x 1 x Input_Size.
# Assuming input is a numpy array.
format_input = torch.tensor(input).view(input.shape[0],1,self.input_size).cuda().float()
# Instead of iterating over time and passing each timestep's input to the LSTM, we can now just pass the entire input sequence.
outputs, hidden = self.lstm(format_input, hidden)
# Predict parameters
latentz_preparam = self.output_layer(outputs[-1])
# Remember, the policy is Gaussian (so we can implement VAE-KLD on it).
latent_z_mu = self.mu_linear_layer(latentz_preparam)
latent_z_log_sig = self.sig_linear_layer(latentz_preparam)
# Predict stop probability.
preact_stop_probs = self.stopping_probability_layer(outputs[-1])
stop_probability = self.softmax_layer(preact_stop_probs)
stop = self.sample_action(stop_probability)
# Remember, the policy is Gaussian (so we can implement VAE-KLD on it).
latent_z_mu = self.mu_linear_layer(latentz_preparam)
latent_z_log_sig = self.sig_linear_layer(latentz_preparam)
# Compute standard deviation.
std = torch.exp(0.5*latent_z_log_sig).cuda()
# Sample random variable.
eps = torch.randn_like(std).cuda()
latent_z = latent_z_mu+eps*std
# Compute KL Divergence Loss term here, so we don't have to return mu's and sigma's.
# Taken from mime_plan_skill.py Line 159 - KL Divergence for Gaussian prior and Gaussian prediction.
kld_loss = -0.5 * torch.sum(1. + latent_z_log_sig - latent_z_mu.pow(2) - latent_z_log_sig.exp())
# Create distributions so that we can evaluate log probability.
dist = torch.distributions.MultivariateNormal(loc = latent_z_mu, covariance_matrix = std*torch.eye((self.opts.nz)).cuda())
# Evaluate log probability in forward so we don't have to do it elswhere.
log_prob = dist.log_prob(latent_z)
return latent_z, stop_probability, stop, log_prob, kld_loss, hidden
def sample_action(self, action_probabilities):
# Categorical distribution sampling.
sample_action = torch.distributions.Categorical(probs=action_probabilities).sample().squeeze(0)
return sample_action
class AltPolicyNetworkSingleTimestep(torch.nn.Module):
# Policy Network inherits from torch.nn.Module.
# Now we overwrite the init, forward functions. And define anything else that we need.
def __init__(self, opts, input_size, hidden_size, output_size):
# Ensures inheriting from torch.nn.Module goes nicely and cleanly.
super(AltPolicyNetworkSingleTimestep, self).__init__()
self.opts = opts
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.num_layers = 4
self.maximum_length = 15
# Define a bidirectional LSTM now.
self.lstm = torch.nn.LSTM(input_size=self.input_size,hidden_size=self.hidden_size,num_layers=self.num_layers)
# Define output layers for the LSTM, and activations for this output layer.
self.output_layer = torch.nn.Linear(self.hidden_size, self.output_size)
# Create linear layer to split prediction into mu and sigma.
self.mu_linear_layer = torch.nn.Linear(self.opts.nz, self.opts.nz)
self.sig_linear_layer = torch.nn.Linear(self.opts.nz, self.opts.nz)
self.softplus_activation_layer = torch.nn.Softplus()
# Stopping probability predictor. (Softmax, not sigmoid)
self.stopping_probability_layer = torch.nn.Linear(self.hidden_size, 2)
self.softmax_layer = torch.nn.Softmax(dim=-1)
self.logsoftmax_layer = torch.nn.LogSoftmax(dim=-1)
def forward(self, input, hidden=None):
# Input format must be: Sequence_Length x 1 x Input_Size.
# Assuming input is a numpy array.
format_input = torch.tensor(input).view(input.shape[0],1,self.input_size).cuda().float()
# Instead of iterating over time and passing each timestep's input to the LSTM, we can now just pass the entire input sequence.
outputs, hidden = self.lstm(format_input, hidden)
# Predict parameters
latentz_preparam = self.output_layer(outputs[-1])
# Remember, the policy is Gaussian (so we can implement VAE-KLD on it).
latent_z_mu = self.mu_linear_layer(latentz_preparam)
latent_z_log_sig = self.sig_linear_layer(latentz_preparam)
latent_z_sig = self.softplus_activation_layer(self.sig_linear_layer(latentz_preparam))
# Predict stop probability.
preact_stop_probs = self.stopping_probability_layer(outputs[-1])
stop_probability = self.softmax_layer(preact_stop_probs)
stop = self.sample_action(stop_probability)
# Create distributions so that we can evaluate log probability.
dist = torch.distributions.MultivariateNormal(loc = latent_z_mu, covariance_matrix = torch.diag_embed(latent_z_sig))
latent_z = dist.sample()
# Evaluate log probability in forward so we don't have to do it elswhere.
log_prob = dist.log_prob(latent_z)
# Set standard distribution for KL.
standard_distribution = torch.distributions.MultivariateNormal(torch.zeros((self.output_size)).cuda(),torch.eye((self.output_size)).cuda())
# Compute KL.
kl_divergence = torch.distributions.kl_divergence(dist, standard_distribution)
return latent_z, stop_probability, stop, log_prob, kl_divergence, hidden
def sample_action(self, action_probabilities):
# Categorical distribution sampling.
sample_action = torch.distributions.Categorical(probs=action_probabilities).sample().squeeze(0)
return sample_action | CausalSkillLearning-main | DownstreamRL/PolicyNet.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from IPython import embed
number_datapoints = 50000
number_timesteps = 25
x_array_dataset = np.zeros((number_datapoints, number_timesteps, 2))
a_array_dataset = np.zeros((number_datapoints, number_timesteps-1, 2))
y_array_dataset = np.zeros((number_datapoints, number_timesteps-1),dtype=int)
b_array_dataset = np.zeros((number_datapoints, number_timesteps-1),dtype=int)
goal_array_dataset = np.zeros((number_datapoints, 1),dtype=int)
action_map = np.array([[0,-1],[-1,0],[0,1],[1,0]])
start_states = np.array([[-2,-2],[-2,2],[2,-2],[2,2]])*5
valid_options = np.array([[2,3],[3,0],[1,2],[0,1]])
for i in range(number_datapoints):
if i%1000==0:
print("Processing Datapoint: ",i)
b_array_dataset[i,0] = 1.
# Select one of four starting points. (-2,-2), (-2,2), (2,-2), (2,2)
goal_array_dataset[i] = np.random.random_integers(0,high=3)
x_array_dataset[i,0] = start_states[goal_array_dataset[i]]
goal = -start_states[goal_array_dataset[i]]
reset_counter = 0
for t in range(number_timesteps-1):
# GET B
if t>0:
# b_array[t] = np.random.binomial(1,prob_b_given_x)
# b_array_dataset[i,t] = np.random.binomial(1,pb_x[0,x_array_dataset[i,t]])
# If 3,4,5 timesteps have passed, terminate.
if reset_counter>=3 and reset_counter<5:
b_array_dataset[i,t] = np.random.binomial(1,0.33)
elif reset_counter==5:
b_array_dataset[i,t] = 1
# GET Y
if b_array_dataset[i,t]:
axes = -goal/abs(goal)
step1 = 30*np.ones((2))-axes*np.abs(x_array_dataset[i,t]-x_array_dataset[i,0])
# baseline = t*20*np.sqrt(2)/20
baseline = t
step2 = step1-baseline
step3 = step2/step2.sum()
y_array_dataset[i,t] = np.random.choice(valid_options[goal_array_dataset[i][0]])
reset_counter = 0
else:
reset_counter+=1
y_array_dataset[i,t] = y_array_dataset[i,t-1]
# GET A
a_array_dataset[i,t] = action_map[y_array_dataset[i,t]]-0.05+0.1*np.random.random((2))
# GET X
x_array_dataset[i,t+1] = x_array_dataset[i,t]+a_array_dataset[i,t]
np.save("X_array_directed_continuous.npy",x_array_dataset)
np.save("Y_array_directed_continuous.npy",y_array_dataset)
np.save("B_array_directed_continuous.npy",b_array_dataset)
np.save("A_array_directed_continuous.npy",a_array_dataset) | CausalSkillLearning-main | DataGenerator/DirectedContinuousTrajs.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.