python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch.nn as nn
from copy import deepcopy
import math
import torch.nn.functional as F
try:
from itertools import izip
except ImportError: # python3.x
izip = zip
__all__ = ['resnext50_32x4d', 'resnext101_32x4d']
class ResNeXtBottleneck(nn.Module):
expansion = 4
"""
RexNeXt bottleneck type C (https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua)
"""
def __init__(self, inplanes, planes, cardinality, base_width=4, stride=1, downsample=None):
super(ResNeXtBottleneck, self).__init__()
D = int(math.floor(planes * (base_width / 64.0)))
C = cardinality
self.conv_reduce = nn.Conv2d(inplanes, D * C, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_reduce = nn.BatchNorm2d(D * C)
self.conv_conv = nn.Conv2d(D * C, D * C, kernel_size=3, stride=stride, padding=1, groups=cardinality,
bias=False)
self.bn = nn.BatchNorm2d(D * C)
self.conv_expand = nn.Conv2d(D * C, planes * 4, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_expand = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
if downsample is not None:
self.downsample = downsample
def forward(self, x):
residual = x
bottleneck = self.conv_reduce(x)
bottleneck = self.relu(self.bn_reduce(bottleneck))
bottleneck = self.conv_conv(bottleneck)
bottleneck = self.relu(self.bn(bottleneck))
bottleneck = self.conv_expand(bottleneck)
bottleneck = self.bn_expand(bottleneck)
if self.downsample is not None:
residual = self.downsample(x)
return self.relu(residual + bottleneck)
class ResNeXt_C5(nn.Module):
def __init__(self, block, layers, cardinality=32, base_width=4):
self.inplanes = 64
super(ResNeXt_C5, self).__init__()
self.cardinality = cardinality
self.base_width = base_width
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7)
self.classifier = nn.Linear(2048, 10000, bias=True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride=stride, downsample=downsample, cardinality=self.cardinality,
base_width=self.base_width))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, cardinality=self.cardinality, base_width=self.base_width))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def load_pretrained(self, base_network):
modules_new = [x for x in self.modules() if (isinstance(x, nn.Conv2d) or isinstance(x, nn.BatchNorm2d)
or isinstance(x, nn.Linear))]
modules_ori = [x for x in base_network.modules() if (isinstance(x, nn.Conv2d) or isinstance(x, nn.BatchNorm2d)
or isinstance(x, nn.Linear))]
assert(len(modules_ori) == len(modules_new))
for module, module_ori in izip(modules_new, modules_ori):
if isinstance(module, nn.Conv2d) and isinstance(module_ori, nn.Conv2d):
if module.weight.data.shape == module_ori.weight.data.shape:
module.weight.data = deepcopy(module_ori.weight.data)
if module_ori.bias is not None:
module.bias.data = deepcopy(module_ori.bias.data)
else:
print('This should not happen. Skipping Conv layer with size: {} and target size: {}'
.format(module.weight.data.shape, module_ori.weight.data.shape))
elif isinstance(module, nn.BatchNorm2d) and isinstance(module_ori, nn.BatchNorm2d):
if module.weight.data.shape == module_ori.weight.data.shape:
# Copy running mean and variance of batchnorm layers!
module.running_mean.data = deepcopy(module_ori.running_mean.data)
module.running_var.data = deepcopy(module_ori.running_var.data)
module.weight.data = deepcopy(module_ori.weight.data)
module.bias.data = deepcopy(module_ori.bias.data)
else:
print('Skipping Batchnorm layer with size: {} and target size: {}'
.format(module.weight.data.shape, module_ori.weight.data.shape))
elif isinstance(module, nn.Linear) and isinstance(module_ori, nn.Linear):
module.weight.data = deepcopy(module_ori.weight.data)
module.bias.data = deepcopy(module_ori.bias.data)
def resnext50_32x4d(pretrained=False):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNeXt_C5(ResNeXtBottleneck, [3, 4, 6, 3], cardinality=32, base_width=4)
if pretrained:
from fblib.networks.torch2pytorch import resnext_50_32x4d
model_full = resnext_50_32x4d.resnext_50_32x4d()
model.load_pretrained(model_full)
return model
def resnext101_32x4d(pretrained=False):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNeXt_C5(ResNeXtBottleneck, [3, 4, 23, 3], cardinality=32, base_width=4)
if pretrained:
from fblib.networks.torch2pytorch import resnext_101_32x4d
model_full = resnext_101_32x4d.resnext_101_32x4d()
model.load_pretrained(model_full)
return model
def test_reproduce():
import pickle
classes = pickle.load(urllib.request.urlopen(
'https://gist.githubusercontent.com/yrevar/6135f1bd8dcf2e0cc683/raw/d133d61a09d7e5a3b36b8c111a8dd5c4b5d560ee'
'/imagenet1000_clsid_to_human.pkl'))
model = resnext101_32x4d(pretrained=True)
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
img = cv2.imread(os.path.join(PROJECT_ROOT_DIR, 'util/img/cat.jpg')).astype(np.float32) / 255.
img = cv2.resize(img, dsize=(224, 224))
img = (img - mean) / std
img = img[:, :, :, np.newaxis]
img = img.transpose((3, 2, 0, 1))
img = torch.from_numpy(img.astype(np.float32))
model = model.eval()
with torch.no_grad():
output = model(img)
output = torch.nn.functional.softmax(output, dim=1)
print('Class id: {}, class name: {}, probability: {:.2f}'''
''.format(output.argmax().item(), classes[output.argmax().item()], output.max().item()))
def test_gflops():
from fblib.util.model_resources.flops import compute_gflops
from torchvision.models import resnet
x50 = resnext101_32x4d(pretrained=False)
print('GFLOPS for ResNeXt: {}'.format(compute_gflops(x50)))
res50 = resnet.resnet101(pretrained=False)
print('GFLOPS for ResNet: {}'.format(compute_gflops(res50)))
if __name__ == '__main__':
import os
import torch
import cv2
import numpy as np
import urllib.request
from fblib import PROJECT_ROOT_DIR
test_gflops()
test_reproduce()
| astmt-master | fblib/networks/classification/resnext.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import math
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from fblib.util.mypath import Path
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
__all__ = ['ResNet', 'resnet18', 'resnet26', 'resnet34', 'resnet50', 'resnet101', 'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet26': 'https://data.vision.ee.ethz.ch/kmaninis/share/MTL//models/resnet26-c36cf79a7.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def norm2d(num_channels, group_norm=False, num_groups=32, num_channels_per_group=16):
if group_norm:
if num_groups is not None:
num_channels_per_group = num_channels / num_groups
else:
num_groups = num_channels / num_channels_per_group
print("Using groupnorm with num_channels: {}, num_groups: {}. and num_channels_per_group: {}".format(
num_channels, num_groups, num_channels_per_group))
return nn.GroupNorm(num_channels=num_channels, num_groups=int(num_groups), affine=True)
else:
return nn.BatchNorm2d(num_channels)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, group_norm=False):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm2d(planes, group_norm)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm2d(planes, group_norm)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, group_norm=False):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = norm2d(planes, group_norm)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = norm2d(planes, group_norm)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = norm2d(planes * 4, group_norm)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, group_norm=False):
self.group_norm = group_norm
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm2d(64, self.group_norm)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.GroupNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
norm2d(planes * block.expansion, group_norm=self.group_norm),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, group_norm=self.group_norm))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, group_norm=self.group_norm))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class ResNetFeatures(ResNet):
def __init__(self, block, layers, num_classes=1000, group_norm=False):
super(ResNetFeatures, self).__init__(block, layers, num_classes, group_norm)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
features = x
x = self.fc(x)
return x, features
def resnet18(pretrained=False, features=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if not features:
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
else:
model = ResNetFeatures(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet26(pretrained=False, features=False, remote=True, **kwargs):
"""Constructs a ResNet-26 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if not features:
model = ResNet(Bottleneck, [2, 2, 2, 2], **kwargs)
else:
model = ResNetFeatures(Bottleneck, [2, 2, 2, 2], **kwargs)
if pretrained:
print('Loading resnet26 Imagenet')
# Load checkpoint
if remote:
checkpoint = load_state_dict_from_url(model_urls['resnet26'], map_location='cpu', progress=True)
else:
checkpoint = torch.load(
os.path.join(Path.models_dir(), 'resnet26.pth'), map_location=lambda storage, loc: storage)
checkpoint = checkpoint['model_state']
# Handle DataParallel
if 'module.' in list(checkpoint.keys())[0]:
new_state_dict = OrderedDict()
for k, v in checkpoint.items():
name = k.replace('module.', '') # remove `module.`
new_state_dict[name] = v
else:
new_state_dict = checkpoint
# Load pre-trained IN model
model.load_state_dict(new_state_dict)
return model
def resnet34(pretrained=False, features=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if not features:
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
else:
model = ResNetFeatures(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, features=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if not features:
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
else:
model = ResNetFeatures(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, features=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if not features:
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
else:
model = ResNetFeatures(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, features=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if not features:
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
else:
model = ResNetFeatures(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
def test_reproduce():
import os
import torch
import pickle
import cv2
import numpy as np
import urllib.request
from fblib import PROJECT_ROOT_DIR
classes = pickle.load(urllib.request.urlopen(
'https://gist.githubusercontent.com/yrevar/6135f1bd8dcf2e0cc683/raw/d133d61a09d7e5a3b36b8c111a8dd5c4b5d560ee'
'/imagenet1000_clsid_to_human.pkl'))
model = resnet26(pretrained=True, features=False)
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
img = cv2.imread(os.path.join(PROJECT_ROOT_DIR, 'util/img/dog.jpg')).astype(np.float32) / 255.
img = cv2.resize(img, dsize=(224, 224))
img = (img - mean) / std
img = img[:, :, :, np.newaxis]
img = img.transpose((3, 2, 0, 1))
img = torch.from_numpy(img.astype(np.float32))
model = model.eval()
with torch.no_grad():
output = model(img)
output = torch.nn.functional.softmax(output, dim=1)
print('Class id: {}, class name: {}, probability: {:.2f}'''
''.format(output.argmax().item(), classes[output.argmax().item()], output.max().item()))
if __name__ == '__main__':
test_reproduce()
| astmt-master | fblib/networks/classification/resnet.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import math
import torch
import torch.nn as nn
from collections import OrderedDict
from fblib.util.mypath import Path
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
model_urls = {
'se_resnet18': 'https://data.vision.ee.ethz.ch/kmaninis/share/MTL//models/se_resnet18-23d68cfd8.pth',
'se_resnet26': 'https://data.vision.ee.ethz.ch/kmaninis/share/MTL//models/se_resnet26-5eb336d20.pth',
'se_resnet50': 'https://data.vision.ee.ethz.ch/kmaninis/share/MTL//models/se_resnet50-ad8889f9f.pth',
'se_resnet101': 'https://data.vision.ee.ethz.ch/kmaninis/share/MTL//models/se_resnet101-8dbb64f8e.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
class SELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y
class CBAMLayer(nn.Module):
def __init__(self, channel, reduction=16):
super(CBAMLayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.max_pool = nn.AdaptiveMaxPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel),
)
self.assemble = nn.Conv2d(2, 1, kernel_size=7, stride=1, padding=3)
def forward(self, x):
x = self._forward_se(x)
x = self._forward_spatial(x)
return x
def _forward_se(self, x):
# Channel attention module (SE with max-pool and average-pool)
b, c, _, _ = x.size()
x_avg = self.fc(self.avg_pool(x).view(b, c)).view(b, c, 1, 1)
x_max = self.fc(self.max_pool(x).view(b, c)).view(b, c, 1, 1)
y = torch.sigmoid(x_avg + x_max)
return x * y
def _forward_spatial(self, x):
# Spatial attention module
x_avg = torch.mean(x, 1, True)
x_max, _ = torch.max(x, 1, True)
y = torch.cat((x_avg, x_max), 1)
y = torch.sigmoid(self.assemble(y))
return x * y
class SEBasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, reduction=16, attention='se'):
super(SEBasicBlock, self).__init__()
if attention == 'se':
attention_layer = SELayer
elif attention == 'cbam':
attention_layer = CBAMLayer
else:
raise NotImplementedError
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, 1)
self.bn2 = nn.BatchNorm2d(planes)
self.se = attention_layer(planes, reduction)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.se(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class SEBottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, reduction=16, attention='se'):
super(SEBottleneck, self).__init__()
if attention == 'se':
attention_layer = SELayer
elif attention == 'cbam':
attention_layer = CBAMLayer
else:
raise NotImplementedError
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.se = attention_layer(planes * 4, reduction)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.se(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, attention='se'):
self.inplanes = 64
super(ResNet, self).__init__()
self.attention = attention
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, attention=self.attention))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, attention=self.attention))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class ResNetFeatures(ResNet):
def __init__(self, block, layers, num_classes=1000, attention='se'):
print('Initializing ResNet with Feature output')
super(ResNetFeatures, self).__init__(block, layers, num_classes, attention)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
features = x
x = self.fc(x)
return x, features
def get_state_dict_se(model_name, remote=True):
# Load checkpoint
if remote:
checkpoint = load_state_dict_from_url(model_urls[model_name], map_location='cpu', progress=True)
else:
checkpoint = torch.load(
os.path.join(Path.models_dir(), model_name + '.pth'), map_location=lambda storage, loc: storage)
checkpoint = checkpoint['model_state']
# Handle DataParallel
if 'module.' in list(checkpoint.keys())[0]:
new_state_dict = OrderedDict()
for k, v in checkpoint.items():
name = k.replace('module.', '') # remove `module.`
new_state_dict[name] = v
else:
new_state_dict = checkpoint
return new_state_dict
def se_resnet18(num_classes=1000, pretrained=False, features=False, attention='se'):
"""Constructs a ResNet-18 model.
Args:
num_classes: number of output classes
pretrained (bool): If True, returns a model pre-trained on ImageNet
attention: 'se' or 'cbam'
"""
if not features:
model = ResNet(SEBasicBlock, [2, 2, 2, 2], num_classes=num_classes, attention=attention)
else:
model = ResNetFeatures(SEBasicBlock, [2, 2, 2, 2], num_classes=num_classes, attention=attention)
model.avgpool = nn.AdaptiveAvgPool2d(1)
if pretrained:
print('Loading se_resnet18 Imagenet')
new_state_dict = get_state_dict_se(attention + '_resnet18')
# Load pre-trained IN model
model.load_state_dict(new_state_dict)
return model
def se_resnet26(num_classes=1000, pretrained=False, features=False, attention='se'):
"""Constructs a ResNet-26 model.
Args:
num_classes: number of output classes
pretrained (bool): If True, returns a model pre-trained on ImageNet
attention: 'se' or 'cbam'
"""
if not features:
model = ResNet(SEBottleneck, [2, 2, 2, 2], num_classes=num_classes, attention=attention)
else:
model = ResNetFeatures(SEBottleneck, [2, 2, 2, 2], num_classes=num_classes, attention=attention)
model.avgpool = nn.AdaptiveAvgPool2d(1)
if pretrained:
print('Loading se_resnet26 Imagenet')
new_state_dict = get_state_dict_se(attention + '_resnet26')
# Load pre-trained IN model
model.load_state_dict(new_state_dict)
return model
def se_resnet50(num_classes=1000, pretrained=False, features=False, attention='se'):
"""Constructs a ResNet-50 model.
Args:
num_classes: number of output classes
pretrained (bool): If True, returns a model pre-trained on ImageNet
attention: 'se' or 'cbam'
"""
if not features:
model = ResNet(SEBottleneck, [3, 4, 6, 3], num_classes=num_classes, attention=attention)
else:
model = ResNetFeatures(SEBottleneck, [3, 4, 6, 3], num_classes=num_classes, attention=attention)
model.avgpool = nn.AdaptiveAvgPool2d(1)
if pretrained:
print('Loading se_resnet50 Imagenet')
new_state_dict = get_state_dict_se(attention + '_resnet50')
# Load pre-trained IN model
model.load_state_dict(new_state_dict)
return model
def se_resnet101(num_classes=1000, pretrained=False, features=False, attention='se'):
"""Constructs a ResNet-101 model.
Args:
num_classes: number of output classes
pretrained (bool): If True, returns a model pre-trained on ImageNet
attention: 'se' or 'cbam'
"""
if not features:
model = ResNet(SEBottleneck, [3, 4, 23, 3], num_classes=num_classes, attention=attention)
else:
model = ResNetFeatures(SEBottleneck, [3, 4, 23, 3], num_classes=num_classes, attention=attention)
model.avgpool = nn.AdaptiveAvgPool2d(1)
if pretrained:
print('Loading se_resnet101 Imagenet')
new_state_dict = get_state_dict_se(attention + '_resnet101')
# Load pre-trained IN model
model.load_state_dict(new_state_dict)
return model
def test_visualize_graph():
import fblib.util.visualize as viz
net = se_resnet26(pretrained=False, attention='se')
net.eval()
x = torch.randn(2, 3, 224, 224)
x.requires_grad_()
y = net(x)
# pdf visualizer
g = viz.make_dot(y, net.state_dict())
g.view(directory='./')
def test_reproduce():
import os
import torch
import pickle
import cv2
import numpy as np
import urllib.request
from fblib import PROJECT_ROOT_DIR
classes = pickle.load(urllib.request.urlopen(
'https://gist.githubusercontent.com/yrevar/6135f1bd8dcf2e0cc683/raw/d133d61a09d7e5a3b36b8c111a8dd5c4b5d560ee'
'/imagenet1000_clsid_to_human.pkl'))
model = se_resnet26(pretrained=True, attention='se')
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
img = cv2.imread(os.path.join(PROJECT_ROOT_DIR, 'util/img/cat.jpg')).astype(np.float32) / 255.
img = cv2.resize(img, dsize=(224, 224))
img = (img - mean) / std
img = img[:, :, :, np.newaxis]
img = img.transpose((3, 2, 0, 1))
img = torch.from_numpy(img.astype(np.float32))
model = model.eval()
with torch.no_grad():
output = model(img)
output = torch.nn.functional.softmax(output, dim=1)
print('Class id: {}, class name: {}, probability: {:.2f}'''
''.format(output.argmax().item(), classes[output.argmax().item()], output.max().item()))
if __name__ == '__main__':
test_visualize_graph()
| astmt-master | fblib/networks/classification/se_resnet.py |
astmt-master | fblib/networks/torch2pytorch/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import torch
import torch.nn as nn
from functools import reduce
from fblib.util.mypath import Path
class LambdaBase(nn.Sequential):
def __init__(self, fn, *args):
super(LambdaBase, self).__init__(*args)
self.lambda_func = fn
def forward_prepare(self, input):
output = []
for module in self._modules.values():
output.append(module(input))
return output if output else input
class Lambda(LambdaBase):
def forward(self, input):
return self.lambda_func(self.forward_prepare(input))
class LambdaMap(LambdaBase):
def forward(self, input):
return list(map(self.lambda_func, self.forward_prepare(input)))
class LambdaReduce(LambdaBase):
def forward(self, input):
return reduce(self.lambda_func, self.forward_prepare(input))
resnext_101_32x4d_model = nn.Sequential( # Sequential,
nn.Conv2d(3, 64, (7, 7), (2, 2), (3, 3), 1, 1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d((3, 3), (2, 2), (1, 1)),
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(64, 128, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128, 128, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
),
nn.Conv2d(128, 256, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(256),
),
nn.Sequential( # Sequential,
nn.Conv2d(64, 256, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(256),
),
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(256, 128, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128, 128, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
),
nn.Conv2d(128, 256, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(256),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(256, 128, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128, 128, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
),
nn.Conv2d(128, 256, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(256),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
),
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(256, 256, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256, 256, (3, 3), (2, 2), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
),
nn.Conv2d(256, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
),
nn.Sequential( # Sequential,
nn.Conv2d(256, 512, (1, 1), (2, 2), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
),
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(512, 256, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
),
nn.Conv2d(256, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(512, 256, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
),
nn.Conv2d(256, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(512, 256, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
),
nn.Conv2d(256, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
),
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(512, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, (3, 3), (2, 2), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
nn.Sequential( # Sequential,
nn.Conv2d(512, 1024, (1, 1), (2, 2), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
),
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (2, 2), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 2048, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(2048),
),
nn.Sequential( # Sequential,
nn.Conv2d(1024, 2048, (1, 1), (2, 2), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(2048),
),
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(2048, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 2048, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(2048),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(2048, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 2048, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(2048),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
),
nn.AvgPool2d((7, 7), (1, 1)),
Lambda(lambda x: x.view(x.size(0), -1)), # View,
nn.Sequential(Lambda(lambda x: x.view(1, -1) if 1 == len(x.size()) else x), nn.Linear(2048, 1000)), # Linear,
)
def resnext_101_32x4d():
model = resnext_101_32x4d_model
model.load_state_dict(torch.load(os.path.join(Path.models_dir(), 'resnext_101_32x4d.pth')))
return model | astmt-master | fblib/networks/torch2pytorch/resnext_101_32x4d.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import torch
import torch.nn as nn
from functools import reduce
from fblib.util.mypath import Path
class LambdaBase(nn.Sequential):
def __init__(self, fn, *args):
super(LambdaBase, self).__init__(*args)
self.lambda_func = fn
def forward_prepare(self, input):
output = []
for module in self._modules.values():
output.append(module(input))
return output if output else input
class Lambda(LambdaBase):
def forward(self, input):
return self.lambda_func(self.forward_prepare(input))
class LambdaMap(LambdaBase):
def forward(self, input):
return list(map(self.lambda_func, self.forward_prepare(input)))
class LambdaReduce(LambdaBase):
def forward(self, input):
return reduce(self.lambda_func, self.forward_prepare(input))
resnext_50_32x4d_model = nn.Sequential( # Sequential,
nn.Conv2d(3, 64, (7, 7), (2, 2), (3, 3), 1, 1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d((3, 3), (2, 2), (1, 1)),
# Layer 1
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(64, 128, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128, 128, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
),
nn.Conv2d(128, 256, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(256),
),
nn.Sequential( # Sequential,
nn.Conv2d(64, 256, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(256),
),
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(256, 128, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128, 128, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
),
nn.Conv2d(128, 256, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(256),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(256, 128, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128, 128, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
),
nn.Conv2d(128, 256, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(256),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
),
# Layer 2
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(256, 256, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256, 256, (3, 3), (2, 2), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
),
nn.Conv2d(256, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
),
nn.Sequential( # Sequential,
nn.Conv2d(256, 512, (1, 1), (2, 2), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
),
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(512, 256, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
),
nn.Conv2d(256, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(512, 256, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
),
nn.Conv2d(256, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(512, 256, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256, 256, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
),
nn.Conv2d(256, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
),
# Layer 3
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(512, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, (3, 3), (2, 2), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
nn.Sequential( # Sequential,
nn.Conv2d(512, 1024, (1, 1), (2, 2), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 512, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
),
# Layer 4
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (2, 2), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 2048, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(2048),
),
nn.Sequential( # Sequential,
nn.Conv2d(1024, 2048, (1, 1), (2, 2), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(2048),
),
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(2048, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 2048, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(2048),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(2048, 1024, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024, 1024, (3, 3), (1, 1), (1, 1), 1, 32, bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024, 2048, (1, 1), (1, 1), (0, 0), 1, 1, bias=False),
nn.BatchNorm2d(2048),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x, y: x + y), # CAddTable,
nn.ReLU(),
),
),
nn.AvgPool2d((7, 7), (1, 1)),
Lambda(lambda x: x.view(x.size(0), -1)), # View,
nn.Sequential(Lambda(lambda x: x.view(1, -1) if 1 == len(x.size()) else x), nn.Linear(2048, 1000)), # Linear,
)
def resnext_50_32x4d():
model = resnext_50_32x4d_model
model.load_state_dict(torch.load(os.path.join(Path.models_dir(), 'resnext_50_32x4d.pth')))
return model
if __name__ == '__main__':
model = resnext_50_32x4d()
| astmt-master | fblib/networks/torch2pytorch/resnext_50_32x4d.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import numpy as np
from copy import deepcopy
import torch
import torch.nn as nn
from torch.nn import init
from torch.nn import functional as F
import torchvision.models.resnet as resnet
from fblib.networks.deeplab_single_task.classifiers import PSPModule, AtrousPyramidModule, AtrousSpatialPyramidPoolingModule
from fblib.layers.misc_layers import center_crop, interp_surgery
from fblib.networks.deeplab_single_task import resnet as custom_resnet
from fblib.util.mypath import Path
try:
from itertools import izip
except ImportError: # python3.x
izip = zip
affine_par = True # Trainable Batchnorm for the classifier
def get_ngroups_gn(dim):
"""
Get number of groups used by groupnorm, based on number of channels
"""
n_lay_per_group_low = 16
n_lay_per_group = 32
if dim <= 256:
assert(dim % n_lay_per_group_low == 0)
return int(dim / n_lay_per_group_low)
else:
assert(dim % n_lay_per_group == 0)
return int(dim / n_lay_per_group)
def outS(i):
i = int(i)
i = (i+1)/2
i = int(np.ceil((i+1)/2.0))
i = (i+1)/2
return i
def conv3x3(in_planes, out_planes, stride=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
dilation=dilation, padding=dilation, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, train_norm_layers=False,
sync_bnorm=False):
super(BasicBlock, self).__init__()
self.bnorm = nn.BatchNorm2d
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = self.bnorm(planes)
for i in self.bn1.parameters():
i.requires_grad = train_norm_layers
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, dilation=dilation)
self.bn2 = self.bnorm(planes)
for i in self.bn2.parameters():
i.requires_grad = train_norm_layers
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, train_norm_layers=False,
sync_bnorm=False):
super(Bottleneck, self).__init__()
padding = dilation
self.bnorm = nn.BatchNorm2d
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False) # change
self.bn1 = self.bnorm(planes, affine=affine_par)
for i in self.bn1.parameters():
i.requires_grad = train_norm_layers
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, # change
padding=padding, bias=False, dilation=dilation)
self.bn2 = self.bnorm(planes, affine=affine_par)
for i in self.bn2.parameters():
i.requires_grad = train_norm_layers
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = self.bnorm(planes * 4, affine=affine_par)
for i in self.bn3.parameters():
i.requires_grad = train_norm_layers
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, n_classes, nInputChannels=3, classifier="atrous",
output_stride=16, decoder=True,
static_graph=False, deconv_upsample=False, groupnorm=False, train_norm_layers=False,
sync_bnorm=False):
super(ResNet, self).__init__()
print("Constructing ResNet model...")
print("Output stride: {}".format(output_stride))
print("Number of classes: {}".format(n_classes))
print("Number of Input Channels: {}".format(nInputChannels))
v3_atrous_rates = [6, 12, 18]
if output_stride == 8:
dilations = (2, 4)
strides = (2, 2, 2, 1, 1)
v3_atrous_rates = [x * 2 for x in v3_atrous_rates]
elif output_stride == 16:
dilations = (1, 2)
strides = (2, 2, 2, 2, 1)
else:
raise ValueError('Choose between output_stride 8 and 16')
self.inplanes = 64
self.classifier = classifier
self.decoder = decoder
self.deconv_upsample = deconv_upsample
self.groupnorm = groupnorm
self.train_norm_layers = train_norm_layers
self.sync_bnorm = sync_bnorm
self.bnorm = nn.BatchNorm2d
self.conv1 = nn.Conv2d(nInputChannels, 64, kernel_size=7, stride=strides[0], padding=3,
bias=False)
self.bn1 = self.bnorm(64, affine=affine_par)
for i in self.bn1.parameters():
i.requires_grad = self.train_norm_layers
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=strides[1], padding=1, ceil_mode=False)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=strides[2])
self.layer3 = self._make_layer(block, 256, layers[2], stride=strides[3], dilation=dilations[0])
self.layer4 = self._make_layer(block, 512, layers[3], stride=strides[4], dilation=dilations[1])
if block == BasicBlock:
in_f, out_f = 512, 128
else:
in_f, out_f = 2048, 512
if decoder:
print('Using decoder')
if classifier == "atrous":
print('Initializing classifier: old atrous pyramid')
out_f_classifier = 256
self.layer5 = AtrousPyramidModule(dilation_series=[6, 12, 18, 24], padding_series=[6, 12, 18, 24],
n_classes=out_f_classifier, in_f=in_f)
elif classifier == "psp":
print('Initializing classifier: PSP')
out_f_classifier = 256
self.layer5 = PSPModule(in_features=in_f, out_features=out_f_classifier, sizes=(1, 2, 3, 6),
n_classes=0, groupnorm=self.groupnorm, sync_bnorm=self.sync_bnorm)
elif classifier == 'atrous-v3':
print('Initializing classifier: A-trous with global features (Deeplab-v3+)')
out_f_classifier = 256
self.layer5 = AtrousSpatialPyramidPoolingModule(depth=out_f_classifier,
groupnorm=self.groupnorm,
dilation_series=v3_atrous_rates,
sync_bnorm=self.sync_bnorm)
else:
out_f_classifier = out_f
self.layer5 = None
if not self.groupnorm:
NormModule = self.bnorm
kwargs_low = {"num_features": 48, "affine": affine_par}
kwargs_out = {"num_features": 256, "affine": affine_par}
else:
NormModule = nn.GroupNorm
kwargs_low = {"num_groups": get_ngroups_gn(48), "num_channels": 48, "affine": affine_par}
kwargs_out = {"num_groups": get_ngroups_gn(256), "num_channels": 256, "affine": affine_par}
self.low_level_reduce = nn.Sequential(
nn.Conv2d(256, 48, kernel_size=1, bias=False),
NormModule(**kwargs_low),
nn.ReLU(inplace=True)
)
self.concat_and_predict = nn.Sequential(
conv3x3(out_f_classifier + 48, 256),
NormModule(**kwargs_out),
nn.ReLU(inplace=True),
conv3x3(256, 256),
NormModule(**kwargs_out),
nn.ReLU(inplace=True),
# final layer
nn.Conv2d(256, n_classes, kernel_size=1, bias=True)
)
if self.deconv_upsample:
print("Using upsampling with deconvolutions")
up_factor = 2
self.upscale_1 = nn.ConvTranspose2d(out_f_classifier, out_f_classifier,
kernel_size=up_factor * 2, stride=up_factor, bias=False)
else:
if classifier == "atrous":
print('Initializing classifier: A-trous pyramid')
self.layer5 = AtrousPyramidModule(dilation_series=[6, 12, 18, 24], padding_series=[6, 12, 18, 24],
n_classes=n_classes, in_f=in_f)
elif classifier == "psp":
print('Initializing classifier: PSP')
self.layer5 = PSPModule(in_features=in_f, out_features=out_f, sizes=(1, 2, 3, 6), n_classes=n_classes,
static_graph=static_graph, groupnorm=self.groupnorm, sync_bnorm=self.sync_bnorm)
elif classifier == "atrous-v3":
print('Initializing classifier: A-trous with global features (Deeplab-v3+)')
self.layer5 = AtrousSpatialPyramidPoolingModule(depth=n_classes, in_f=in_f,
groupnorm=self.groupnorm,
dilation_series=v3_atrous_rates,
sync_bnorm=self.sync_bnorm)
else:
self.layer5 = None
# Initialize weights
self._initialize_weights()
# Check if batchnorm parameters are trainable
self._verify_bnorm_params()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion or dilation == 2 or dilation == 4:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
self.bnorm(planes * block.expansion, affine=affine_par),
)
# Train batchnorm?
for i in downsample._modules['1'].parameters():
i.requires_grad = self.train_norm_layers
layers = []
layers.append(block(self.inplanes, planes, stride, dilation=dilation, downsample=downsample,
train_norm_layers=self.train_norm_layers, sync_bnorm=self.sync_bnorm))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation,
train_norm_layers=self.train_norm_layers, sync_bnorm=self.sync_bnorm))
return nn.Sequential(*layers)
def _verify_bnorm_params(self):
verify_trainable = True
a = 0
for x in self.modules():
if isinstance(x, nn.BatchNorm2d):
for y in x.parameters():
verify_trainable = (verify_trainable and y.requires_grad)
a += isinstance(x, nn.BatchNorm2d)
print("\nVerification: Trainable batchnorm parameters? Answer: {}\n".format(verify_trainable))
print("Asynchronous bnorm layers: {}".format(a))
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, self.bnorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.ConvTranspose2d):
m.weight.data.zero_()
m.weight.data = interp_surgery(m)
elif isinstance(m, nn.Linear):
init.kaiming_normal_(m.weight)
m.bias.data.zero_()
def forward(self, x, bbox=None):
h, w = x.shape[2:]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
if self.decoder:
x_low = x
low_h, low_w = int(x_low.size()[-2]), int(x_low.size()[-1])
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
if self.layer5 is not None:
x = self.layer5(x)
if self.decoder:
if self.deconv_upsample:
x = center_crop(self.upscale_1(x), low_h, low_w)
else:
x = F.interpolate(x, size=(x_low.shape[2], x_low.shape[3]),
mode='bilinear', align_corners=False)
x_low = self.low_level_reduce(x_low)
x = torch.cat([x, x_low], dim=1)
x = self.concat_and_predict(x)
x = F.interpolate(x, size=(h, w), mode='bilinear', align_corners=False)
return x
def load_pretrained(self, base_network, nInputChannels=3):
flag = 0
i = 0
for module, module_ori in izip(self.modules(), base_network.modules()):
if isinstance(module, nn.Conv2d) and isinstance(module_ori, nn.Conv2d):
if not flag and nInputChannels != 3:
module.weight[:, :3, :, :].data = deepcopy(module_ori.weight.data)
module.bias = deepcopy(module_ori.bias)
for i in range(3, int(module.weight.data.shape[1])):
module.weight[:, i, :, :].data = deepcopy(module_ori.weight[:, -1, :, :][:, np.newaxis, :, :].data)
flag = 1
i += 1
elif module.weight.data.shape == module_ori.weight.data.shape:
i += 1
module.weight.data = deepcopy(module_ori.weight.data)
module.bias = deepcopy(module_ori.bias)
else:
print('Skipping Conv layer with size: {} and target size: {}'
.format(module.weight.data.shape, module_ori.weight.data.shape))
elif isinstance(module, self.bnorm) and \
(isinstance(module_ori, nn.BatchNorm2d) or isinstance(module_ori, self.bnorm)):
if module.weight.data.shape == module_ori.weight.data.shape:
i += 1
# Copy running mean and variance of batchnorm layers!
module.running_mean.data = deepcopy(module_ori.running_mean.data)
module.running_var.data = deepcopy(module_ori.running_var.data)
module.weight.data = deepcopy(module_ori.weight.data)
module.bias.data = deepcopy(module_ori.bias.data)
else:
print('Skipping Batchnorm layer with size: {} and target size: {}'
.format(module.weight.data.shape, module_ori.weight.data.shape))
print("Content of {} layers successfully copied.".format(i))
def load_pretrained_ms(self, base_network, nInputChannels=3):
flag = 0
for module, module_ori in izip(self.modules(), base_network.Scale.modules()):
if isinstance(module, nn.Conv2d) and isinstance(module_ori, nn.Conv2d):
if not flag and nInputChannels != 3:
module.weight[:, :3, :, :].data = deepcopy(module_ori.weight.data)
module.bias = deepcopy(module_ori.bias)
for i in range(3, int(module.weight.data.shape[1])):
module.weight[:, i, :, :].data = deepcopy(module_ori.weight[:, -1, :, :][:, np.newaxis, :, :].data)
flag = 1
elif module.weight.data.shape == module_ori.weight.data.shape:
module.weight.data = deepcopy(module_ori.weight.data)
module.bias = deepcopy(module_ori.bias)
else:
print('Skipping Conv layer with size: {} and target size: {}'
.format(module.weight.data.shape, module_ori.weight.data.shape))
elif isinstance(module, self.bnorm) and (isinstance(module_ori, nn.BatchNorm2d) or isinstance(module_ori, self.bnorm)) \
and module.weight.data.shape == module_ori.weight.data.shape:
# Copy running mean and variance of batchnorm layers!
module.running_mean.data = deepcopy(module_ori.running_mean.data)
module.running_var.data = deepcopy(module_ori.running_var.data)
module.weight.data = deepcopy(module_ori.weight.data)
module.bias.data = deepcopy(module_ori.bias.data)
class MS_Deeplab(nn.Module):
def __init__(self, block, NoLabels, nInputChannels=3):
super(MS_Deeplab, self).__init__()
self.Scale = ResNet(block, [3, 4, 23, 3], NoLabels, nInputChannels=nInputChannels)
def forward(self, x):
input_size = x.size()[2]
self.interp1 = nn.Upsample(size=(int(input_size*0.75)+1, int(input_size*0.75)+1), mode='bilinear', align_corners=False)
self.interp2 = nn.Upsample(size=(int(input_size*0.5)+1, int(input_size*0.5)+1), mode='bilinear', align_corners=False)
self.interp3 = nn.Upsample(size=(outS(input_size), outS(input_size)), mode='bilinear', align_corners=False)
out = []
x2 = self.interp1(x)
x3 = self.interp2(x)
out.append(self.Scale(x)) # for original scale
out.append(self.interp3(self.Scale(x2))) # for 0.75x scale
out.append(self.Scale(x3)) # for 0.5x scale
x2Out_interp = out[1]
x3Out_interp = self.interp3(out[2])
temp1 = torch.max(out[0], x2Out_interp)
out.append(torch.max(temp1, x3Out_interp))
return out[-1]
def Res_Deeplab(n_classes=21, pretrained=None):
model = MS_Deeplab(Bottleneck, n_classes)
if pretrained is not None:
if pretrained == 'voc':
pth_model = 'MS_DeepLab_resnet_trained_VOC.pth'
elif pretrained == 'ms_coco':
pth_model = 'MS_DeepLab_resnet_pretrained_COCO_init.pth'
saved_state_dict = torch.load(os.path.join(Path.models_dir(), pth_model),
map_location=lambda storage, loc: storage)
if n_classes != 21:
for i in saved_state_dict:
i_parts = i.split('.')
if i_parts[1] == 'layer5':
saved_state_dict[i] = model.state_dict()[i]
model.load_state_dict(saved_state_dict)
return model
def get_lr_params(model):
"""
This generator returns all the parameters of the net except for
the last classification layer. Note that for each batchnorm layer,
requires_grad is set to False in deeplab_resnet.py, therefore this function does not return
any batchnorm parameter
"""
# for multi-GPU training
if hasattr(model, 'module'):
model = model.module
b = [model.conv1, model.bn1, model.layer1, model.layer2, model.layer3, model.layer4, model.layer5]
# If decoder exists
if 'low_level_reduce' in model._modules.keys():
b.extend([model.low_level_reduce, model.concat_and_predict])
for i in range(len(b)):
for k in b[i].parameters():
if k.requires_grad:
yield k
def get_1x_lr_params(model):
"""
This generator returns all the parameters of the net except for
the last classification layer. Note that for each batchnorm layer,
requires_grad is set to False in deeplab_resnet.py, therefore this function does not return
any batchnorm parameter
"""
# for multi-GPU training
if hasattr(model, 'module'):
model = model.module
b = [model.conv1, model.bn1, model.layer1, model.layer2, model.layer3, model.layer4]
# If decoder exists
if 'low_level_reduce' in model._modules.keys():
b.extend([model.low_level_reduce])
for i in range(len(b)):
for k in b[i].parameters():
if k.requires_grad:
yield k
def get_10x_lr_params(model):
"""
This generator returns all the parameters for the last layer of the net,
which does the classification of pixel into classes
"""
# for multi-GPU training
if hasattr(model, 'module'):
model = model.module
b = [model.layer5]
# If decoder exists
if 'low_level_reduce' in model._modules.keys():
b.extend([model.concat_and_predict])
for j in range(len(b)):
for k in b[j].parameters():
if k.requires_grad:
yield k
def lr_poly(base_lr, iter_, max_iter=100, power=0.9):
return base_lr*((1-float(iter_)/max_iter)**power)
def resnet18(n_classes, pretrained=None, nInputChannels=3, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], n_classes, nInputChannels=nInputChannels, **kwargs)
model_full = resnet.resnet18(pretrained=True)
if pretrained:
model.load_pretrained(model_full, nInputChannels=nInputChannels)
return model
def resnet26(n_classes, pretrained='scratch', nInputChannels=3, **kwargs):
"""Constructs a ResNet-26 model.
Args:
pretrained ('imagenet', 'scratch): If 'imagenet', returns a model pre-trained on ImageNet
"""
print('Constructing ResNet26')
model = ResNet(Bottleneck, [2, 2, 2, 2], n_classes, nInputChannels=nInputChannels, **kwargs)
if pretrained == 'imagenet':
print('Loading resnet 26 weights')
model_full = custom_resnet.resnet26(pretrained=True)
model.load_pretrained(model_full, nInputChannels=nInputChannels)
elif pretrained == 'scratch':
print('Training from scratch')
else:
raise NotImplementedError('Please specify scratch or imagenet for pre-training')
return model
def resnet50(n_classes, pretrained=None, nInputChannels=3, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], n_classes, nInputChannels=nInputChannels, **kwargs)
if pretrained is not None:
model_full = resnet.resnet50(pretrained=True)
model.load_pretrained(model_full, nInputChannels=nInputChannels)
return model
def resnet101(n_classes, pretrained='scratch', nInputChannels=3, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained ('imagenet', 'voc', 'ms_coco): Select model trained on respective dataset.
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], n_classes, nInputChannels=nInputChannels, **kwargs)
if pretrained == 'imagenet':
print('Initializing from pre-trained ImageNet model..')
model_full = resnet.resnet101(pretrained=True)
model.load_pretrained(model_full, nInputChannels=nInputChannels)
elif pretrained == 'ms_coco' or pretrained == 'voc':
model_full = Res_Deeplab(n_classes, pretrained=pretrained)
model.load_pretrained_ms(model_full, nInputChannels=nInputChannels)
else:
print('Training from scratch')
return model
def test_flops():
from fblib.util.model_resources.flops import compute_gflops
net = resnet26(n_classes=21, pretrained='imagenet', classifier="atrous-v3",
output_stride=16, decoder=True)
print('GFLOPS: {}'.format(compute_gflops(net, (2, 3, 256, 256))))
if __name__ == '__main__':
test_flops()
| astmt-master | fblib/networks/deeplab_single_task/deeplab_resnet.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
from copy import deepcopy
import torch
import torch.nn as nn
from torch.nn import init
from torch.nn import functional as F
import fblib.networks.classification.se_resnet as se_resnet_imagenet
from fblib.networks.deeplab_single_task.classifiers import AtrousSpatialPyramidPoolingModule
from fblib.layers.misc_layers import interp_surgery
try:
from itertools import izip
except ImportError: # python3.x
izip = zip
affine_par = True # Trainable Batchnorm for the classifier
def outS(i):
i = int(i)
i = (i+1)/2
i = int(np.ceil((i+1)/2.0))
i = (i+1)/2
return i
def conv3x3(in_planes, out_planes, stride=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
dilation=dilation, padding=dilation, bias=False)
class SELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y
class SEBottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, train_norm_layers=False,
sync_bnorm=False, reduction=16):
super(SEBottleneck, self).__init__()
padding = dilation
self.bnorm = nn.BatchNorm2d
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False)
self.bn1 = self.bnorm(planes)
for i in self.bn1.parameters():
i.requires_grad = train_norm_layers
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1,
padding=padding, bias=False, dilation=dilation)
self.bn2 = self.bnorm(planes)
for i in self.bn2.parameters():
i.requires_grad = train_norm_layers
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = self.bnorm(planes * 4)
for i in self.bn3.parameters():
i.requires_grad = train_norm_layers
self.relu = nn.ReLU(inplace=True)
self.se = SELayer(planes * 4, reduction)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.se(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class SEResNet(nn.Module):
def __init__(self, block, layers, n_classes, nInputChannels=3, classifier="atrous",
output_stride=16, decoder=True, train_norm_layers=False, sync_bnorm=False):
super(SEResNet, self).__init__()
print("Constructing Squeeeze and Excitation ResNet model...")
print("Output stride: {}".format(output_stride))
print("Number of classes: {}".format(n_classes))
v3_atrous_rates = [6, 12, 18]
if output_stride == 8:
dilations = (2, 4)
strides = (2, 2, 2, 1, 1)
v3_atrous_rates = [x * 2 for x in v3_atrous_rates]
elif output_stride == 16:
dilations = (1, 2)
strides = (2, 2, 2, 2, 1)
else:
raise ValueError('Choose between output_stride 8 and 16')
self.inplanes = 64
self.classifier = classifier
self.decoder = decoder
self.train_norm_layers = train_norm_layers
self.sync_bnorm = sync_bnorm
self.bnorm = nn.BatchNorm2d
self.conv1 = nn.Conv2d(nInputChannels, 64, kernel_size=7, stride=strides[0], padding=3,
bias=False)
self.bn1 = self.bnorm(64, affine=affine_par)
for i in self.bn1.parameters():
i.requires_grad = self.train_norm_layers
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=strides[1], padding=1, ceil_mode=False)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=strides[2])
self.layer3 = self._make_layer(block, 256, layers[2], stride=strides[3], dilation=dilations[0])
self.layer4 = self._make_layer(block, 512, layers[3], stride=strides[4], dilation=dilations[1])
in_f, out_f = 2048, 512
if decoder:
print('Using decoder')
if classifier == 'atrous-v3':
print('Initializing classifier: A-trous with global features (Deeplab-v3+)')
out_f_classifier = 256
self.layer5 = AtrousSpatialPyramidPoolingModule(depth=out_f_classifier,
dilation_series=v3_atrous_rates,
sync_bnorm=self.sync_bnorm)
else:
raise NotImplementedError('Select one of the available decoders')
NormModule = self.bnorm
kwargs_low = {"num_features": 48, "affine": affine_par}
kwargs_out = {"num_features": 256, "affine": affine_par}
self.low_level_reduce = nn.Sequential(
nn.Conv2d(256, 48, kernel_size=1, bias=False),
NormModule(**kwargs_low),
nn.ReLU(inplace=True)
)
self.concat_and_predict = nn.Sequential(
conv3x3(out_f_classifier + 48, 256),
NormModule(**kwargs_out),
nn.ReLU(inplace=True),
conv3x3(256, 256),
NormModule(**kwargs_out),
nn.ReLU(inplace=True),
# final layer
nn.Conv2d(256, n_classes, kernel_size=1, bias=True)
)
else:
if classifier == "atrous-v3":
print('Initializing classifier: A-trous with global features (Deeplab-v3+)')
self.layer5 = AtrousSpatialPyramidPoolingModule(depth=n_classes, in_f=in_f,
dilation_series=v3_atrous_rates,
sync_bnorm=self.sync_bnorm)
else:
self.layer5 = None
# Initialize weights
self._initialize_weights()
# Check if batchnorm parameters are trainable
self._verify_bnorm_params()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion or dilation == 2 or dilation == 4:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
self.bnorm(planes * block.expansion, affine=affine_par),
)
# Train batchnorm?
for i in downsample._modules['1'].parameters():
i.requires_grad = self.train_norm_layers
layers = [block(self.inplanes, planes, stride, dilation=dilation, downsample=downsample,
train_norm_layers=self.train_norm_layers, sync_bnorm=self.sync_bnorm)]
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation,
train_norm_layers=self.train_norm_layers, sync_bnorm=self.sync_bnorm))
return nn.Sequential(*layers)
def _verify_bnorm_params(self):
verify_trainable = True
a = 0
for x in self.modules():
if isinstance(x, nn.BatchNorm2d):
for y in x.parameters():
verify_trainable = (verify_trainable and y.requires_grad)
a += isinstance(x, nn.BatchNorm2d)
print("\nVerification: Trainable batchnorm parameters? Answer: {}\n".format(verify_trainable))
print("Asynchronous bnorm layers: {}".format(a))
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, self.bnorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.ConvTranspose2d):
m.weight.data.zero_()
m.weight.data = interp_surgery(m)
elif isinstance(m, nn.Linear):
init.kaiming_normal_(m.weight)
m.bias.data.zero_()
def forward(self, x, bbox=None):
h, w = x.shape[2:]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
if self.decoder:
x_low = x
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
if self.decoder:
x = F.interpolate(x, size=(x_low.shape[2], x_low.shape[3]),
mode='bilinear', align_corners=False)
x_low = self.low_level_reduce(x_low)
x = torch.cat([x, x_low], dim=1)
x = self.concat_and_predict(x)
x = F.interpolate(x, size=(h, w), mode='bilinear', align_corners=False)
return x
def load_pretrained(self, base_network, nInputChannels=3):
flag = 0
i = 0
for module, module_ori in izip(self.modules(), base_network.modules()):
if isinstance(module, nn.Conv2d) and isinstance(module_ori, nn.Conv2d):
if not flag and nInputChannels != 3:
module.weight[:, :3, :, :].data = deepcopy(module_ori.weight.data)
module.bias = deepcopy(module_ori.bias)
for i in range(3, int(module.weight.data.shape[1])):
module.weight[:, i, :, :].data = deepcopy(module_ori.weight[:, -1, :, :][:, np.newaxis, :, :].data)
flag = 1
i += 1
elif module.weight.data.shape == module_ori.weight.data.shape:
i += 1
module.weight.data = deepcopy(module_ori.weight.data)
module.bias = deepcopy(module_ori.bias)
else:
print('Skipping Conv layer with size: {} and target size: {}'
.format(module.weight.data.shape, module_ori.weight.data.shape))
elif isinstance(module, self.bnorm) and \
(isinstance(module_ori, nn.BatchNorm2d) or isinstance(module_ori, self.bnorm)):
if module.weight.data.shape == module_ori.weight.data.shape:
i += 1
# Copy running mean and variance of batchnorm layers!
module.running_mean.data = deepcopy(module_ori.running_mean.data)
module.running_var.data = deepcopy(module_ori.running_var.data)
module.weight.data = deepcopy(module_ori.weight.data)
module.bias.data = deepcopy(module_ori.bias.data)
else:
print('Skipping Batchnorm layer with size: {} and target size: {}'
.format(module.weight.data.shape, module_ori.weight.data.shape))
elif isinstance(module, nn.Linear) and isinstance(module_ori, nn.Linear):
module.weight.data = deepcopy(module_ori.weight.data)
module.bias.data = deepcopy(module_ori.bias.data)
i += 1
print("Content of {} layers successfully copied.".format(i))
def get_lr_params(model):
"""
This generator returns all the parameters of the net except for
the last classification layer. Note that for each batchnorm layer,
requires_grad is set to False in deeplab_resnet.py, therefore this function does not return
any batchnorm parameter
"""
# for multi-GPU training
if hasattr(model, 'module'):
model = model.module
b = [model.conv1, model.bn1, model.layer1, model.layer2, model.layer3, model.layer4, model.layer5]
# If decoder exists
if 'low_level_reduce' in model._modules.keys():
b.extend([model.low_level_reduce, model.concat_and_predict])
for i in range(len(b)):
for k in b[i].parameters():
if k.requires_grad:
yield k
def get_1x_lr_params(model):
"""
This generator returns all the parameters of the net except for
the last classification layer. Note that for each batchnorm layer,
requires_grad is set to False in deeplab_resnet.py, therefore this function does not return
any batchnorm parameter
"""
# for multi-GPU training
if hasattr(model, 'module'):
model = model.module
b = [model.conv1, model.bn1, model.layer1, model.layer2, model.layer3, model.layer4]
# If decoder exists
if 'low_level_reduce' in model._modules.keys():
b.extend([model.low_level_reduce])
for i in range(len(b)):
for k in b[i].parameters():
if k.requires_grad:
yield k
def get_10x_lr_params(model):
"""
This generator returns all the parameters for the last layer of the net,
which does the classification of pixel into classes
"""
# for multi-GPU training
if hasattr(model, 'module'):
model = model.module
b = [model.layer5]
# If decoder exists
if 'low_level_reduce' in model._modules.keys():
b.extend([model.concat_and_predict])
for j in range(len(b)):
for k in b[j].parameters():
if k.requires_grad:
yield k
def lr_poly(base_lr, iter_, max_iter=100, power=0.9):
return base_lr*((1-float(iter_)/max_iter)**power)
def se_resnet26(n_classes, pretrained=None, nInputChannels=3, **kwargs):
"""Constructs a ResNet-26 model.
Args:
pretrained ('imagenet', 'scratch'): If True, returns a model pre-trained on ImageNet
"""
model = SEResNet(SEBottleneck, [2, 2, 2, 2], n_classes, nInputChannels=nInputChannels, **kwargs)
if pretrained == 'imagenet':
model_full = se_resnet_imagenet.se_resnet26(pretrained=True)
model.load_pretrained(model_full, nInputChannels=nInputChannels)
elif pretrained == 'scratch':
print('Training from scratch')
else:
raise NotImplementedError('Select imagenet or scratch for pre-training')
return model
def se_resnet50(n_classes, pretrained=None, nInputChannels=3, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained ('imagenet', 'scratch'): If True, returns a model pre-trained on ImageNet
"""
model = SEResNet(SEBottleneck, [3, 4, 6, 3], n_classes, nInputChannels=nInputChannels, **kwargs)
if pretrained == 'imagenet':
model_full = se_resnet_imagenet.se_resnet50(pretrained=True)
model.load_pretrained(model_full, nInputChannels=nInputChannels)
elif pretrained == 'scratch':
print('Training from scratch')
else:
raise NotImplementedError('Select imagenet or scratch for pre-training')
return model
def se_resnet101(n_classes, pretrained='scratch', nInputChannels=3, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained ('imagenet', 'scratch'): Select model trained on respective dataset.
"""
model = SEResNet(SEBottleneck, [3, 4, 23, 3], n_classes, nInputChannels=nInputChannels, **kwargs)
if pretrained == 'imagenet':
model_full = se_resnet_imagenet.se_resnet101(pretrained=True)
model.load_pretrained(model_full, nInputChannels=nInputChannels)
elif pretrained == 'scratch':
print('Training from scratch')
else:
raise NotImplementedError('Select imagenet or scratch for pre-training')
return model
def test_flops():
from fblib.util.model_resources.flops import compute_gflops
net = se_resnet50(n_classes=21, pretrained='imagenet', classifier="atrous-v3",
output_stride=16, decoder=True)
print('GFLOPS: {}'.format(compute_gflops(net, (2, 3, 256, 256))))
if __name__ == '__main__':
test_flops()
| astmt-master | fblib/networks/deeplab_single_task/deeplab_se_resnet.py |
astmt-master | fblib/networks/deeplab_single_task/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import fblib.networks.classification.mobilenet_v2 as mobilenet_v2_imagenet
from fblib.util.mypath import Path
from collections import OrderedDict
def conv3x3_mnet(planes, stride=1, dilation=1):
"""3x3 depth-wiseconvolution with padding"""
return nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
dilation=dilation, padding=dilation, bias=False,
groups=planes)
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
class ASPPMnet(nn.Module):
"""
Atrous Spatial Pyramid Pooling Module (DeepLab-v3+) for mobilenet
"""
def __init__(self, dilation_series=None, out_f=256, in_f=320):
super(ASPPMnet, self).__init__()
if dilation_series is None:
dilation_series = [6, 12, 18]
padding_series = dilation_series
self.bnorm = nn.BatchNorm2d
kwargs = {"num_features": out_f, "affine": True}
# Reduce features in order to apply depth-wise convolutions
self.conv_reduce = nn.Sequential(nn.Conv2d(in_f, out_f, kernel_size=1, stride=1, bias=False),
self.bnorm(**kwargs),
nn.ReLU6(inplace=True))
# List of parallel convolutions
self.conv2d_list = nn.ModuleList()
# 1x1 convolution
self.conv2d_list.append(nn.Sequential(nn.Conv2d(out_f, out_f, kernel_size=1, stride=1,
bias=False, groups=out_f),
self.bnorm(**kwargs),
nn.ReLU6(inplace=True)))
# Dilated Convolutions
for dilation, padding in zip(dilation_series, padding_series):
self.conv2d_list.append(nn.Sequential(nn.Conv2d(out_f, out_f, kernel_size=3, stride=1, padding=padding,
dilation=dilation, bias=False, groups=out_f),
self.bnorm(**kwargs),
nn.ReLU6(inplace=True)))
# Global features
self.conv2d_list.append(nn.Sequential(nn.AdaptiveAvgPool2d(output_size=(1, 1)),
nn.Conv2d(out_f, out_f, kernel_size=1, stride=1,
bias=False, groups=out_f),
self.bnorm(**kwargs),
nn.ReLU6(inplace=True)))
self.conv2d_final = nn.Sequential(nn.Conv2d(out_f * 5, out_f, kernel_size=1,
stride=1, bias=False, groups=out_f),
self.bnorm(**kwargs),
nn.ReLU6(inplace=True))
def forward(self, x):
h, w = x.size(2), x.size(3)
# Reduce
x = self.conv_reduce(x)
# ASPP
interm = []
for i in range(len(self.conv2d_list)):
interm.append(self.conv2d_list[i](x))
# Upsample the global features
interm[-1] = F.interpolate(input=interm[-1], size=(h, w), mode='bilinear', align_corners=False)
# Concatenate the parallel streams
out = torch.cat(interm, dim=1)
# Final convolutional layer of the classifier
out = self.conv2d_final(out)
return out
class ASPPDecoderMnet(nn.Module):
"""
ASPP-v3 decoder for Mobilenet
"""
def __init__(self,
n_classes,
in_channels_high=320,
in_channels_low=24,
out_f_classifier=256,
atrous_rates=None,
up=4,
):
super(ASPPDecoderMnet, self).__init__()
print('Initializing Mobilenet ASPP v3 Decoder')
if atrous_rates is None:
atrous_rates = [6, 12, 18]
kwargs_out = {"num_features": out_f_classifier, "affine": True}
kwargs_low = {"num_features": 48, "affine": True}
self.up = up
self.norm = nn.BatchNorm2d
print('Initializing classifier: ASPP with global features (Deeplab-v3+)')
self.layer5 = ASPPMnet(in_f=in_channels_high,
out_f=out_f_classifier,
dilation_series=atrous_rates)
self.low_level_reduce = nn.Sequential(nn.Conv2d(in_channels_low, 48, kernel_size=1,
stride=1, bias=False, groups=2),
self.norm(**kwargs_low),
nn.ReLU6(inplace=True))
self.conv_concat = nn.Sequential(nn.Conv2d(out_f_classifier + 48, out_f_classifier, kernel_size=3, padding=1,
stride=1, bias=False, groups=math.gcd(304, 256)),
self.norm(**kwargs_out),
nn.ReLU6(inplace=True))
self.conv_process = nn.Sequential(conv3x3_mnet(out_f_classifier),
self.norm(**kwargs_out),
nn.ReLU6(inplace=True))
self.conv_predict = nn.Conv2d(out_f_classifier, n_classes, kernel_size=1, bias=True)
def forward(self, x_low, x):
x_low = self.low_level_reduce(x_low)
x = self.layer5(x)
x = F.interpolate(x, scale_factor=self.up, mode='bilinear', align_corners=False)
x = torch.cat((x_low, x), dim=1)
x = self.conv_concat(x)
x = self.conv_process(x)
features = x
x = self.conv_predict(x)
return x, features
class InvResidualCommon(nn.Module):
"""Common Inverted Residual block for Mobilenet
"""
def __init__(self, hidden_dim, oup, stride, dilation=1, train_norm_layers=False):
super(InvResidualCommon, self).__init__()
self.conv1 = nn.Conv2d(hidden_dim, hidden_dim, 3, stride, padding=dilation,
groups=hidden_dim, bias=False, dilation=dilation)
self.bn1 = nn.BatchNorm2d(hidden_dim)
for x in self.bn1.parameters():
x.requires_grad = train_norm_layers
self.conv2 = nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False)
self.bn2 = nn.BatchNorm2d(oup)
for x in self.bn2.parameters():
x.requires_grad = train_norm_layers
self.relu = nn.ReLU6(inplace=True)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
return x
class InvResidualExpand(nn.Module):
"""Expanding inverted residual block for Mobilenet
"""
def __init__(self, inp, hidden_dim, oup, stride, dilation=1, train_norm_layers=False):
super(InvResidualExpand, self).__init__()
self.conv1 = nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False)
self.bn1 = nn.BatchNorm2d(hidden_dim)
for x in self.bn1.parameters():
x.requires_grad = train_norm_layers
self.conv2 = nn.Conv2d(hidden_dim, hidden_dim, 3, stride, padding=dilation,
groups=hidden_dim, bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm2d(hidden_dim)
for x in self.bn2.parameters():
x.requires_grad = train_norm_layers
self.conv3 = nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False)
self.bn3 = nn.BatchNorm2d(oup)
for x in self.bn3.parameters():
x.requires_grad = train_norm_layers
self.relu = nn.ReLU6(inplace=True)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn3(x)
return x
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio, dilation=1, train_norm_layers=False):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = round(inp * expand_ratio)
self.use_res_connect = self.stride == 1 and inp == oup
if expand_ratio == 1:
self.conv = InvResidualCommon(hidden_dim=hidden_dim,
oup=oup,
stride=stride,
dilation=dilation,
train_norm_layers=train_norm_layers)
else:
self.conv = InvResidualExpand(inp=inp,
hidden_dim=hidden_dim,
oup=oup,
stride=stride,
dilation=dilation,
train_norm_layers=train_norm_layers)
def forward(self, x):
if self.use_res_connect:
out = self.conv(x)
return x + out
else:
out = self.conv(x)
return out
class MobileNetV2(nn.Module):
def __init__(self, n_classes, width_mult=1., output_stride=16, train_norm_layers=False,
nInputChannels=3, classifier='atrous-v3', sync_bnorm=False):
super(MobileNetV2, self).__init__()
self.train_norm_layers = train_norm_layers
if sync_bnorm:
raise NotImplementedError('Sync bnorm not implemented for mobilenet')
atrous_rates = [6, 12, 18]
if output_stride == 8:
dilations = (2, 4)
strides = (2, 2, 2, 1, 1)
atrous_rates = [x * 2 for x in atrous_rates]
elif output_stride == 16:
dilations = (1, 2)
strides = (2, 2, 2, 2, 1)
else:
raise ValueError('Choose between output_stride 8 and 16')
block = InvertedResidual
input_channel = 32
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1, 1],
[6, 24, 2, strides[1], 1],
[6, 32, 3, strides[2], 1],
[6, 64, 4, strides[3], dilations[0]],
[6, 96, 3, 1, dilations[0]],
[6, 160, 3, strides[4], dilations[1]],
[6, 320, 1, 1, dilations[1]],
]
input_channel = int(input_channel * width_mult)
# build first layer of low level features
self.features = [conv_bn(nInputChannels, input_channel, strides[0])]
# build inverted residual blocks
for t, c, n, s, dil in inverted_residual_setting:
output_channel = int(c * width_mult)
for i in range(n):
if i == 0:
self.features.append(block(input_channel, output_channel, s,
expand_ratio=t,
dilation=dil,
train_norm_layers=train_norm_layers))
else:
self.features.append(block(input_channel, output_channel, 1,
expand_ratio=t,
dilation=dil,
train_norm_layers=train_norm_layers))
input_channel = output_channel
self.features = nn.Sequential(*self.features)
self.features_low = self.features[:4]
self.features_high = self.features[4:]
if classifier == 'atrous-v3':
self.decoder = ASPPDecoderMnet(n_classes=n_classes,
in_channels_high=320,
out_f_classifier=256,
atrous_rates=atrous_rates)
else:
raise NotImplementedError('Implemented classifier: atrous-v3')
self._initialize_weights()
self._verify_bnorm_params()
def forward(self, x):
in_shape = x.shape[2:]
x_low = self.features_low(x)
x = self.features_high(x_low)
x, _ = self.decoder(x_low, x)
x = F.interpolate(x, size=(in_shape[-2], in_shape[-1]), mode='bilinear', align_corners=False)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def _verify_bnorm_params(self):
verify_trainable = True
a = 0
for x in self.modules():
if isinstance(x, nn.BatchNorm2d):
for y in x.parameters():
verify_trainable = (verify_trainable and y.requires_grad)
a += isinstance(x, nn.BatchNorm2d)
print("\nVerification: Trainable batchnorm parameters? Answer: {}\n".format(verify_trainable))
print("Asynchronous bnorm layers: {}".format(a))
@staticmethod
def _define_if_copyable(module):
is_copyable = isinstance(module, nn.Conv2d) \
or isinstance(module, nn.Linear) \
or isinstance(module, nn.BatchNorm2d) or \
isinstance(module, nn.BatchNorm2d)
return is_copyable
def load_pretrained(self, base_network):
copy_trg = {}
for (name_trg, module_trg) in self.named_modules():
if self._define_if_copyable(module_trg):
copy_trg[name_trg] = module_trg
copy_src = {}
for (name_src, module_src) in base_network.named_modules():
if self._define_if_copyable(module_src):
copy_src[name_src] = module_src
mapping = {}
for name_trg in copy_trg:
if 'decoder' in name_trg:
continue
elif 'features.' in name_trg:
mapping[name_trg] = name_trg
for name_trg in mapping:
map_trg = mapping[name_trg]
if '.conv1' in name_trg:
map_trg = map_trg.replace('.conv1', '.0')
elif '.bn1' in name_trg:
map_trg = map_trg.replace('.bn1', '.1')
elif '.conv2' in name_trg:
map_trg = map_trg.replace('.conv2', '.3')
elif '.bn2' in name_trg:
map_trg = map_trg.replace('.bn2', '.4')
elif '.conv3' in name_trg:
map_trg = map_trg.replace('.conv3', '.6')
elif '.bn3' in name_trg:
map_trg = map_trg.replace('.bn3', '.7')
mapping[name_trg] = map_trg
i = 0
for name in mapping:
module_trg = copy_trg[name]
module_src = copy_src[mapping[name]]
if module_trg.weight.data.shape != module_src.weight.data.shape:
print('skipping layer with size: {} and target size: {}'
.format(module_trg.weight.data.shape, module_src.weight.data.shape))
continue
if isinstance(module_trg, nn.Conv2d) and isinstance(module_src, nn.Conv2d):
module_trg.weight.data = module_src.weight.data.clone()
if module_src.bias is not None:
module_trg.bias = module_src.bias.clone()
i += 1
elif isinstance(module_trg, nn.BatchNorm2d) and isinstance(module_src, nn.BatchNorm2d):
# copy running mean and variance of batchnorm layers!
module_trg.running_mean.data = module_src.running_mean.data.clone()
module_trg.running_var.data = module_src.running_var.data.clone()
module_trg.weight.data = module_src.weight.data.clone()
module_trg.bias.data = module_src.bias.data.clone()
i += 1
elif isinstance(module_trg, nn.Linear) and (isinstance(module_src, nn.Linear)):
module_trg.weight.data = module_src.weight.data.clone()
module_trg.bias.data = module_src.bias.data.clone()
i += 1
print('\nContents of {} out of {} layers successfully copied\n'
.format(i, len(mapping)))
def get_lr_params(model, part='all'):
"""
This generator returns all the parameters of the network
"""
# for multi-GPU training
if hasattr(model, 'module'):
model = model.module
if part == 'all':
b = [model]
elif part == 'backbone':
b = [model.features_low, model.features]
elif part == 'decoder':
b = [model.decoder]
for i in range(len(b)):
for name, k in b[i].named_parameters():
if k.requires_grad:
yield k
def mobilenet_v2(pretrained='scratch', **kwargs):
model = MobileNetV2(**kwargs)
if pretrained == 'imagenet':
print('loading pre-trained imagenet model')
model_full = mobilenet_v2_imagenet.mobilenet_v2(pretrained=True)
model.load_pretrained(model_full)
elif pretrained == 'coco':
print('loading pre-trained COCO model')
# Load checkpoint
checkpoint = torch.load(
os.path.join(Path.models_dir(), 'mobilenet_v2_coco_80.pth'), map_location=lambda storage, loc: storage)
# handle dataparallel
if 'module.' in list(checkpoint.keys())[0]:
new_state_dict = OrderedDict()
for k, v in checkpoint.items():
name = k.replace('module.', '') # remove `module.`
new_state_dict[name] = v
else:
new_state_dict = checkpoint
# Load pre-trained IN model
model.load_state_dict(new_state_dict)
elif pretrained == 'scratch':
print('using imagenet initialized from scratch')
else:
raise NotImplementedError('select either imagenet or scratch for pre-training')
return model
def test_flops():
from fblib.util.model_resources.flops import compute_gflops
net = mobilenet_v2(n_classes=21, pretrained='imagenet',
output_stride=16, train_norm_layers=True)
print('GFLOPS: {}'.format(compute_gflops(net, (2, 3, 512, 512))))
def test_visualize_network():
import fblib.util.pdf_visualizer as viz
net = mobilenet_v2(n_classes=21, pretrained='imagenet',
output_stride=16, train_norm_layers=True)
net.eval()
x = torch.randn(1, 3, 512, 512)
x.requires_grad_()
# pdf visualizer
y = net.forward(x)
g = viz.make_dot(y, net.state_dict())
g.view(directory='.')
if __name__ == '__main__':
test_flops()
# test_visualize_network()
| astmt-master | fblib/networks/deeplab_single_task/deeplab_mobilenet_v2.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch.nn as nn
from torch.nn import functional as F
import torch
affine_par = True
class AtrousSpatialPyramidPoolingModule(nn.Module):
"""
Atrous Spatial Pyramid Pooling Module (DeepLab-v3+)
"""
def __init__(self, dilation_series=[6, 12, 18], depth=256, in_f=2048, groupnorm=False, sync_bnorm=False,
cardinality=1, exist_decoder=True):
super(AtrousSpatialPyramidPoolingModule, self).__init__()
padding_series = dilation_series
self.conv2d_list = nn.ModuleList()
self.bnorm = nn.BatchNorm2d
if not groupnorm:
NormModule = self.bnorm
kwargs = {"num_features": depth, "affine": affine_par}
else:
NormModule = nn.GroupNorm
kwargs = {"num_groups": 16, "num_channels": depth, "affine": affine_par}
# 1x1 convolution
self.conv2d_list.append(nn.Sequential(nn.Conv2d(in_f, depth, kernel_size=1, stride=1, bias=False),
NormModule(**kwargs),
nn.ReLU(inplace=True)))
for dilation, padding in zip(dilation_series, padding_series):
self.conv2d_list.append(nn.Sequential(nn.Conv2d(in_f, depth, kernel_size=3, stride=1, padding=padding,
dilation=dilation, bias=False, groups=cardinality),
NormModule(**kwargs),
nn.ReLU(inplace=True)))
# Global features
self.conv2d_list.append(nn.Sequential(nn.AdaptiveAvgPool2d(output_size=(1, 1)),
nn.Conv2d(in_f, depth, kernel_size=1, stride=1, bias=False),
NormModule(**kwargs),
nn.ReLU(inplace=True)))
if exist_decoder:
self.conv2d_final = nn.Sequential(nn.Conv2d(depth * 5, depth, kernel_size=1, stride=1, bias=False),
NormModule(**kwargs),
nn.ReLU(inplace=True))
else:
self.conv2d_final = nn.Sequential(nn.Conv2d(depth * 5, depth, kernel_size=1, stride=1, bias=True))
for m in self.conv2d_list:
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0, 0.01)
elif isinstance(m, self.bnorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
for m in self.conv2d_final:
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0, 0.01)
elif isinstance(m, self.bnorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
h, w = x.size(2), x.size(3)
interm = []
for i in range(len(self.conv2d_list)):
interm.append(self.conv2d_list[i](x))
# Upsample the global features
interm[-1] = F.interpolate(input=interm[-1], size=(h, w), mode='bilinear', align_corners=False)
# Concatenate the parallel streams
out = torch.cat(interm, dim=1)
# Final convolutional layer of the classifier
out = self.conv2d_final(out)
return out
class AtrousPyramidModule(nn.Module):
"""
Atrous Pyramid Module (DeepLab-v2)
"""
def __init__(self, dilation_series, padding_series, n_classes, in_f=2048):
super(AtrousPyramidModule, self).__init__()
self.conv2d_list = nn.ModuleList()
for dilation, padding in zip(dilation_series, padding_series):
self.conv2d_list.append(nn.Conv2d(in_f, n_classes, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias=True))
for m in self.conv2d_list:
m.weight.data.normal_(0, 0.01)
def forward(self, x):
out = self.conv2d_list[0](x)
for i in range(len(self.conv2d_list)-1):
out += self.conv2d_list[i+1](x)
return out
class PSPModule(nn.Module):
"""
Pyramid Scene Parsing Module (PSP Net)
"""
def __init__(self, in_features=2048, out_features=512, sizes=(1, 2, 3, 6), n_classes=1,
static_graph=False, groupnorm=False, sync_bnorm=False):
super(PSPModule, self).__init__()
self.groupnorm = groupnorm
self.stages = []
self.static_graph = static_graph
self.stages = nn.ModuleList([self._make_stage_1(in_features, size) for size in sizes])
self.bottleneck = self._make_stage_2(in_features * (len(sizes)//4 + 1), out_features)
self.n_classes = n_classes
self.bnorm = nn.BatchNorm2d
if self.n_classes > 0:
self.final = nn.Conv2d(out_features, n_classes, kernel_size=1)
def _make_stage_1(self, in_features, size):
if self.static_graph:
# For input_image = 256
# kernel_size = {1: 32, 2: 16, 3: 10, 6: 5}
# For input_image = 512 The stride for level 6 is not the same as in AdaptiveAvgPool2d
kernel_stride_size = {1: [64, 64], 2: [32, 32], 3: [22, 21], 6: [11, 9]}
prior = nn.AvgPool2d(kernel_size=kernel_stride_size[size][0], stride=kernel_stride_size[size][1])
else:
prior = nn.AdaptiveAvgPool2d(output_size=(size, size))
conv = nn.Conv2d(in_features, in_features//4, kernel_size=1, bias=False)
if not self.groupnorm:
bn = self.bnorm(num_features=in_features//4, affine=affine_par)
else:
bn = nn.GroupNorm(num_groups=16, num_channels=in_features//4, affine=affine_par)
relu = nn.ReLU(inplace=True)
return nn.Sequential(prior, conv, bn, relu)
def _make_stage_2(self, in_features, out_features):
conv = nn.Conv2d(in_features, out_features, kernel_size=1, bias=False)
if not self.groupnorm:
bn = self.bnorm(num_features=out_features, affine=affine_par)
else:
bn = nn.GroupNorm(num_groups=32, num_channels=out_features, affine=affine_par
)
relu = nn.ReLU(inplace=True)
return nn.Sequential(conv, bn, relu)
def forward(self, feats):
h, w = feats.size(2), feats.size(3)
priors = [F.interpolate(input=stage(feats), size=(h, w), mode='bilinear', align_corners=False)
for stage in self.stages]
priors.append(feats)
bottle = self.bottleneck(torch.cat(priors, 1))
if self.n_classes > 0:
out = self.final(bottle)
else:
out = bottle
return out
| astmt-master | fblib/networks/deeplab_single_task/classifiers.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import math
from copy import deepcopy
from collections import OrderedDict
import torch
import torch.nn as nn
from torch.nn.functional import upsample
import torchvision.models.resnet as resnet
from fblib.util.mypath import Path
try:
from itertools import izip
except ImportError: # python3.x
izip = zip
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
__all__ = ['resnet18', 'resnet34', 'resnet26', 'resnet50', 'resnet101', 'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet26': 'https://data.vision.ee.ethz.ch/kmaninis/share/MTL//models/resnet26-c36cf79a7.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet_C5(nn.Module):
def __init__(self, block, layers):
self.inplanes = 64
super(ResNet_C5, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.output = nn.Conv2d(2048, 1, kernel_size=1, stride=1, padding=0)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
input_size = x.size()
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.output(x)
x = upsample(x, size=(input_size[2], input_size[3]), mode='bilinear')
return x
def load_pretrained(self, base_network):
for module, module_ori in izip(self.modules(), base_network.modules()):
if isinstance(module, nn.Conv2d) and isinstance(module_ori, nn.Conv2d):
if module.weight.data.shape == module_ori.weight.data.shape:
module.weight.data = deepcopy(module_ori.weight.data)
module.bias = deepcopy(module_ori.bias)
else:
print('Skipping Conv layer with size: {} and target size: {}'
.format(module.weight.data.shape, module_ori.weight.data.shape))
elif isinstance(module, nn.BatchNorm2d) and isinstance(module_ori, nn.BatchNorm2d):
if module.weight.data.shape == module_ori.weight.data.shape:
# Copy running mean and variance of batchnorm layers!
module.running_mean.data = deepcopy(module_ori.running_mean.data)
module.running_var.data = deepcopy(module_ori.running_var.data)
module.weight.data = deepcopy(module_ori.weight)
module.bias.data = deepcopy(module_ori.bias.data)
else:
print('Skipping Batchnorm layer with size: {} and target size: {}'
.format(module.weight.data.shape, module_ori.weight.data.shape))
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet_C5(BasicBlock, [2, 2, 2, 2], **kwargs)
model_full = resnet.resnet18(pretrained=True)
if pretrained:
model.load_pretrained(model_full)
return model
def resnet26(pretrained=False, remote=True):
"""Constructs a ResNet-26 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet_C5(Bottleneck, [2, 2, 2, 2])
if pretrained:
# Define ResNet26 ImageNet
model_IN = resnet.ResNet(block=Bottleneck, layers=[2, 2, 2, 2], num_classes=1000)
# Load checkpoint
if remote:
checkpoint = load_state_dict_from_url(model_urls['resnet26'], map_location='cpu', progress=True)
else:
checkpoint = torch.load(
os.path.join(Path.models_dir(), 'resnet26.pth'), map_location=lambda storage, loc: storage)
checkpoint = checkpoint['model_state']
# Handle DataParallel
if 'module.' in list(checkpoint.keys())[0]:
new_state_dict = OrderedDict()
for k, v in checkpoint.items():
name = k.replace('module.', '') # remove `module.`
new_state_dict[name] = v
else:
new_state_dict = checkpoint
# Load pre-trained IN model
model_IN.load_state_dict(new_state_dict)
# Load weights to dense-labelling network
model.load_pretrained(model_IN)
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet_C5(BasicBlock, [3, 4, 6, 3], **kwargs)
model_full = resnet.resnet34(pretrained=True)
if pretrained:
model.load_pretrained(model_full)
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet_C5(Bottleneck, [3, 4, 6, 3], **kwargs)
model_full = resnet.resnet50(pretrained=True)
if pretrained:
model.load_pretrained(model_full)
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet_C5(Bottleneck, [3, 4, 23, 3], **kwargs)
model_full = resnet.resnet101(pretrained=True)
if pretrained:
model.load_pretrained(model_full)
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet_C5(Bottleneck, [3, 8, 36, 3], **kwargs)
model_full = resnet.resnet152(pretrained=True)
if pretrained:
model.load_pretrained(model_full)
return model
if __name__ == '__main__':
net = resnet26(pretrained=True)
| astmt-master | fblib/networks/deeplab_single_task/resnet.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
from copy import deepcopy
import numpy as np
import torch
import torch.nn as nn
from torch.nn import init
from torch.nn import functional as F
import fblib.networks.classification.resnext as resnext_imagenet
from fblib.networks.deeplab_single_task.classifiers import PSPModule, AtrousPyramidModule, AtrousSpatialPyramidPoolingModule
from fblib.layers.misc_layers import interp_surgery
try:
from itertools import izip
except ImportError: # python3.x
izip = zip
affine_par = True # Trainable Batchnorm for the classifier
def outS(i):
i = int(i)
i = (i + 1) / 2
i = int(np.ceil((i + 1) / 2.0))
i = (i + 1) / 2
return i
def conv3x3(in_planes, out_planes, stride=1, dilation=1, cardinality=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
dilation=dilation, padding=dilation, bias=False, groups=cardinality)
class ResNeXtBottleneck(nn.Module):
expansion = 4
"""
RexNeXt bottleneck type C (https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua)
"""
def __init__(self, inplanes, planes, base_width=4, stride=1, downsample=None,
dilation=1, train_norm_layers=False, sync_bnorm=False, cardinality=32):
super(ResNeXtBottleneck, self).__init__()
D = int(math.floor(planes * (base_width / 64.0)))
C = cardinality
padding = dilation
self.bnorm = nn.BatchNorm2d
self.conv_reduce = nn.Conv2d(inplanes, D * C, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_reduce = self.bnorm(D * C, affine=affine_par)
for i in self.bn_reduce.parameters():
i.requires_grad = train_norm_layers
self.conv_conv = nn.Conv2d(D * C, D * C, kernel_size=3, stride=stride, padding=padding, groups=cardinality,
bias=False, dilation=dilation)
self.bn = self.bnorm(D * C, affine=affine_par)
for i in self.bn.parameters():
i.requires_grad = train_norm_layers
self.conv_expand = nn.Conv2d(D * C, planes * 4, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_expand = self.bnorm(planes * 4, affine=affine_par)
for i in self.bn_expand.parameters():
i.requires_grad = train_norm_layers
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
residual = x
bottleneck = self.conv_reduce(x)
bottleneck = self.relu(self.bn_reduce(bottleneck))
bottleneck = self.conv_conv(bottleneck)
bottleneck = self.relu(self.bn(bottleneck))
bottleneck = self.conv_expand(bottleneck)
bottleneck = self.bn_expand(bottleneck)
if self.downsample is not None:
residual = self.downsample(x)
return self.relu(residual + bottleneck)
class ResNeXt(nn.Module):
def __init__(self, block, layers, n_classes, nInputChannels=3, classifier="atrous",
output_stride=16, decoder=True, train_norm_layers=False,
sync_bnorm=False, cardinality=32):
super(ResNeXt, self).__init__()
print("Constructing ResNeXt model...")
print("Output stride: {}".format(output_stride))
print("Number of classes: {}".format(n_classes))
print("Number of Input Channels: {}".format(nInputChannels))
print("Cardinality: {}".format(cardinality))
v3_atrous_rates = [6, 12, 18]
if output_stride == 8:
dilations = (2, 4)
strides = (2, 2, 2, 1, 1)
v3_atrous_rates = [x * 2 for x in v3_atrous_rates]
elif output_stride == 16:
dilations = (1, 2)
strides = (2, 2, 2, 2, 1)
else:
raise ValueError('Choose between output_stride 8 and 16')
self.cardinality = cardinality
self.inplanes = 64
self.classifier = classifier
self.decoder = decoder
self.train_norm_layers = train_norm_layers
self.sync_bnorm = sync_bnorm
self.bnorm = nn.BatchNorm2d
# Layer structure
self.conv1 = nn.Conv2d(nInputChannels, 64, kernel_size=7, stride=strides[0], padding=3,
bias=False)
self.bn1 = self.bnorm(64, affine=affine_par)
for i in self.bn1.parameters():
i.requires_grad = self.train_norm_layers
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=strides[1], padding=1, ceil_mode=False)
self.layer1 = self._make_layer(block, 64, layers[0],
train_norm_layers=train_norm_layers)
self.layer2 = self._make_layer(block, 128, layers[1], stride=strides[2],
train_norm_layers=self.train_norm_layers)
self.layer3 = self._make_layer(block, 256, layers[2], stride=strides[3], dilation=dilations[0],
train_norm_layers=self.train_norm_layers)
self.layer4 = self._make_layer(block, 512, layers[3], stride=strides[4], dilation=dilations[1],
train_norm_layers=self.train_norm_layers)
in_f, out_f = 2048, 512
if decoder:
print('Using decoder')
if classifier == "atrous":
print('Initializing classifier: old atrous pyramid')
out_f_classifier = 256
self.layer5 = AtrousPyramidModule(dilation_series=[6, 12, 18, 24], padding_series=[6, 12, 18, 24],
n_classes=out_f_classifier, in_f=in_f)
elif classifier == "psp":
print('Initializing classifier: PSP')
out_f_classifier = 256
self.layer5 = PSPModule(in_features=in_f, out_features=out_f_classifier, sizes=(1, 2, 3, 6),
n_classes=0, sync_bnorm=self.sync_bnorm)
elif classifier == 'atrous-v3':
print('Initializing classifier: A-trous with global features (Deeplab-v3+)')
out_f_classifier = 256
self.layer5 = AtrousSpatialPyramidPoolingModule(depth=out_f_classifier,
dilation_series=v3_atrous_rates,
sync_bnorm=self.sync_bnorm,
cardinality=self.cardinality)
else:
out_f_classifier = out_f
self.layer5 = None
NormModule = self.bnorm
kwargs_low = {"num_features": 48, "affine": affine_par}
kwargs_out = {"num_features": 256, "affine": affine_par}
self.low_level_reduce = nn.Sequential(
nn.Conv2d(256, 48, kernel_size=1, bias=False),
NormModule(**kwargs_low),
nn.ReLU(inplace=True)
)
self.concat_and_predict = nn.Sequential(
conv3x3(out_f_classifier + 48, 256),
NormModule(**kwargs_out),
nn.ReLU(inplace=True),
conv3x3(256, 256, cardinality=self.cardinality),
NormModule(**kwargs_out),
nn.ReLU(inplace=True),
# final layer
nn.Conv2d(256, n_classes, kernel_size=1, bias=True)
)
else:
if classifier == "atrous":
print('Initializing classifier: A-trous pyramid')
self.layer5 = AtrousPyramidModule(dilation_series=[6, 12, 18, 24], padding_series=[6, 12, 18, 24],
n_classes=n_classes, in_f=in_f)
elif classifier == "psp":
print('Initializing classifier: PSP')
self.layer5 = PSPModule(in_features=in_f, out_features=out_f, sizes=(1, 2, 3, 6), n_classes=n_classes,
sync_bnorm=self.sync_bnorm)
elif classifier == "atrous-v3":
print('Initializing classifier: A-trous with global features (Deeplab-v3+)')
self.layer5 = AtrousSpatialPyramidPoolingModule(depth=n_classes, in_f=in_f,
dilation_series=v3_atrous_rates,
sync_bnorm=self.sync_bnorm)
else:
self.layer5 = None
# Initialize weights
self._initialize_weights()
# Check if batchnorm parameters are trainable
self._verify_bnorm_params()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, train_norm_layers=False):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion or dilation == 2 or dilation == 4:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
self.bnorm(planes * block.expansion, affine=affine_par),
)
# Train batchnorm?
for i in downsample._modules['1'].parameters():
i.requires_grad = train_norm_layers
layers = []
layers.append(block(self.inplanes, planes, stride=stride, dilation=dilation,
downsample=downsample, train_norm_layers=train_norm_layers, sync_bnorm=self.sync_bnorm,
cardinality=self.cardinality))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation, train_norm_layers=train_norm_layers,
sync_bnorm=self.sync_bnorm, cardinality=self.cardinality))
return nn.Sequential(*layers)
def _verify_bnorm_params(self):
verify_trainable = True
a = 0
for x in self.modules():
if isinstance(x, nn.BatchNorm2d):
for y in x.parameters():
verify_trainable = (verify_trainable and y.requires_grad)
a += isinstance(x, nn.BatchNorm2d)
print("\nVerification: Trainable batchnorm parameters? Answer: {}\n".format(verify_trainable))
print("Asynchronous bnorm layers: {}".format(a))
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, self.bnorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.ConvTranspose2d):
m.weight.data.zero_()
m.weight.data = interp_surgery(m)
elif isinstance(m, nn.Linear):
init.kaiming_normal_(m.weight)
m.bias.data.zero_()
def forward(self, x, bbox=None):
h, w = x.shape[2:]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
if self.decoder:
x_low = x
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
if self.decoder:
x = F.interpolate(x, size=(x_low.shape[2], x_low.shape[3]), mode='bilinear', align_corners=False)
x_low = self.low_level_reduce(x_low)
x = torch.cat([x, x_low], dim=1)
x = self.concat_and_predict(x)
x = F.interpolate(x, size=(h, w), mode='bilinear', align_corners=False)
return x
def load_pretrained(self, base_network, nInputChannels=3):
flag = 0
i = 0
for module, module_ori in izip(self.modules(), base_network.modules()):
if isinstance(module, nn.Conv2d) and isinstance(module_ori, nn.Conv2d):
if not flag and nInputChannels != 3:
module.weight[:, :3, :, :].data = deepcopy(module_ori.weight.data)
module.bias = deepcopy(module_ori.bias)
for i in range(3, int(module.weight.data.shape[1])):
module.weight[:, i, :, :].data = deepcopy(
module_ori.weight[:, -1, :, :][:, np.newaxis, :, :].data)
flag = 1
i += 1
elif module.weight.data.shape == module_ori.weight.data.shape:
i += 1
module.weight.data = deepcopy(module_ori.weight.data)
module.bias = deepcopy(module_ori.bias)
else:
print('Skipping Conv layer with size: {} and target size: {}'
.format(module.weight.data.shape, module_ori.weight.data.shape))
elif isinstance(module, self.bnorm) and \
(isinstance(module_ori, nn.BatchNorm2d) or isinstance(module_ori, self.bnorm)):
if module.weight.data.shape == module_ori.weight.data.shape:
i += 1
# Copy running mean and variance of batchnorm layers!
module.running_mean.data = deepcopy(module_ori.running_mean.data)
module.running_var.data = deepcopy(module_ori.running_var.data)
module.weight.data = deepcopy(module_ori.weight.data)
module.bias.data = deepcopy(module_ori.bias.data)
else:
print('Skipping Batchnorm layer with size: {} and target size: {}'
.format(module.weight.data.shape, module_ori.weight.data.shape))
print("Content of {} layers successfully copied.".format(i))
def get_lr_params(model):
"""
This generator returns all the parameters of the net except for
the last classification layer. Note that for each batchnorm layer,
requires_grad is set to False in deeplab_resnet.py, therefore this function does not return
any batchnorm parameter
"""
# for multi-GPU training
if hasattr(model, 'module'):
model = model.module
b = [model.conv1, model.bn1, model.layer1, model.layer2, model.layer3, model.layer4, model.layer5]
# If decoder exists
if 'low_level_reduce' in model._modules.keys():
b.extend([model.low_level_reduce, model.concat_and_predict])
for i in range(len(b)):
for k in b[i].parameters():
if k.requires_grad:
yield k
def get_1x_lr_params(model):
"""
This generator returns all the parameters of the net except for
the last classification layer. Note that for each batchnorm layer,
requires_grad is set to False in deeplab_resnet.py, therefore this function does not return
any batchnorm parameter
"""
# for multi-GPU training
if hasattr(model, 'module'):
model = model.module
b = [model.conv1, model.bn1, model.layer1, model.layer2, model.layer3, model.layer4]
# If decoder exists
if 'low_level_reduce' in model._modules.keys():
b.extend([model.low_level_reduce])
for i in range(len(b)):
for k in b[i].parameters():
if k.requires_grad:
yield k
def get_10x_lr_params(model):
"""
This generator returns all the parameters for the last layer of the net,
which does the classification of pixel into classes
"""
# for multi-GPU training
if hasattr(model, 'module'):
model = model.module
b = [model.layer5]
# If decoder exists
if 'low_level_reduce' in model._modules.keys():
b.extend([model.concat_and_predict])
for j in range(len(b)):
for k in b[j].parameters():
if k.requires_grad:
yield k
def lr_poly(base_lr, iter_, max_iter=100, power=0.9):
return base_lr * ((1 - float(iter_) / max_iter) ** power)
def resnext50(n_classes, pretrained=None, nInputChannels=3, cardinality=32, **kwargs):
"""Constructs a ResNeXt-50 model.
Args:
pretrained ('scratch', 'imagenet'): If True, returns a model pre-trained on ImageNet
"""
model = ResNeXt(ResNeXtBottleneck, layers=[3, 4, 6, 3], n_classes=n_classes,
nInputChannels=nInputChannels, cardinality=cardinality, **kwargs)
if pretrained == 'imagenet':
model_full = resnext_imagenet.resnext50_32x4d(pretrained=True)
model.load_pretrained(model_full, nInputChannels=nInputChannels)
elif pretrained == 'scratch':
print('Training from scratch')
else:
raise NotImplementedError('Select imagenet or scratch for pre-training')
return model
def resnext101(n_classes, pretrained=False, nInputChannels=3, cardinality=32, **kwargs):
"""Constructs a ResNeXt-101 model.
Args:
pretrained ('scratch', 'imagenet'): Select model trained on respective dataset.
"""
model = ResNeXt(ResNeXtBottleneck, layers=[3, 4, 23, 3], n_classes=n_classes,
nInputChannels=nInputChannels, cardinality=cardinality, **kwargs)
if pretrained == 'imagenet':
model_full = resnext_imagenet.resnext101_32x4d(pretrained=True)
model.load_pretrained(model_full, nInputChannels=nInputChannels)
elif pretrained == 'scratch':
print('Training from scratch')
else:
raise NotImplementedError('Select imagenet or scratch for pre-training')
return model
def test_flops():
from fblib.util.model_resources.flops import compute_gflops
net = resnext50(n_classes=21, pretrained='imagenet', classifier="atrous-v3",
output_stride=16, decoder=True, train_norm_layers=True, sync_bnorm=False)
print('GFLOPS: {}'.format(compute_gflops(net, (2, 3, 256, 256))))
if __name__ == '__main__':
test_flops()
| astmt-master | fblib/networks/deeplab_single_task/deeplab_resnext.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import grad
from fblib.util.custom_container import SequentialMultiTask
import fblib.networks.classification.se_mobilenet_v2 as se_mobilenet_v2_imagenet
from fblib.networks.deeplab_multi_task.discriminators import FullyConvDiscriminator
from fblib.layers.reverse_grad import ReverseLayerF
def conv3x3_mnet(planes, stride=1, dilation=1):
"""3x3 depth-wiseconvolution with padding"""
return nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
dilation=dilation, padding=dilation, bias=False,
groups=planes)
class ConvBNMTL(nn.Module):
"""Simple 3x3 convolution, batchnorm and relu for MobileNet"""
def __init__(self, inp, oup, stride):
super(ConvBNMTL, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True))
def forward(self, x, task=None):
return self.conv(x)
class SEMobileMultiTaskDict(nn.Module):
"""SE for multiple tasks, for MobileNet"""
def __init__(self, channel, reduction=4, tasks=None):
super(SEMobileMultiTaskDict, self).__init__()
self.tasks = tasks
self.avg_pool = nn.AdaptiveAvgPool2d(1)
if self.tasks is None:
self.fc = nn.Sequential(nn.Linear(channel, channel // reduction),
nn.ReLU6(inplace=True),
nn.Linear(channel // reduction, channel),
nn.Sigmoid())
else:
print('Initializing Mobile Squeeze and Excitation modules:')
self.fc = nn.ModuleDict()
for task in self.tasks:
print('SE Mobile for task: {}'.format(task))
self.fc[task] = SequentialMultiTask(nn.Linear(channel, channel // reduction),
nn.ReLU6(inplace=True),
nn.Linear(channel // reduction, channel),
nn.Sigmoid())
def forward(self, x, task=None):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
if self.tasks:
y = self.fc[task](y).view(b, c, 1, 1)
else:
y = self.fc(y).view(b, c, 1, 1)
return x * y
class ConvCoupledSEMnet(nn.Module):
"""
SE-layer per task, coupled with convolutions and batchnorm.
"""
def __init__(self, tasks,
process_layers=None,
norm_kwargs=None,
norm_per_task=False,
squeeze=False,
reduction=4):
super(ConvCoupledSEMnet, self).__init__()
self.norm_per_task = norm_per_task
self.squeeze = squeeze
if not isinstance(process_layers, list):
process_layers = [process_layers]
self.process = nn.Sequential(*process_layers)
if self.squeeze:
print('Initializing SE on decoder')
self.se = SEMobileMultiTaskDict(process_layers[-1].out_channels, tasks=tasks, reduction=reduction)
if self.norm_per_task:
print('Initializing batchnorm per task on decoder')
self.norm = nn.ModuleDict({task: nn.BatchNorm2d(**norm_kwargs) for task in tasks})
else:
self.norm = nn.BatchNorm2d(**norm_kwargs)
self.relu = nn.ReLU6(inplace=True)
def forward(self, x, task):
x = self.process(x)
if self.norm_per_task:
x = self.norm[task](x)
else:
x = self.norm(x)
x = self.relu(x)
if self.squeeze:
x = self.se(x, task)
return x
class InvResidualCommon(nn.Module):
"""Common Inverted Residual block for Mobilenet
"""
def __init__(self, tasks, norm_per_task, hidden_dim, oup, stride, dilation=1):
super(InvResidualCommon, self).__init__()
self.norm_per_task = norm_per_task
self.conv1 = nn.Conv2d(hidden_dim, hidden_dim, 3, stride, padding=dilation,
groups=hidden_dim, bias=False, dilation=dilation)
if self.norm_per_task:
self.bn1 = nn.ModuleDict({task: nn.BatchNorm2d(hidden_dim) for task in tasks})
else:
self.bn1 = nn.BatchNorm2d(hidden_dim)
self.conv2 = nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False)
if self.norm_per_task:
print('Initializing Batchnorm per task on encoder')
self.bn2 = nn.ModuleDict({task: nn.BatchNorm2d(oup) for task in tasks})
else:
self.bn2 = nn.BatchNorm2d(oup)
self.relu = nn.ReLU6(inplace=True)
def forward(self, x, task=None):
x = self.conv1(x)
if self.norm_per_task:
x = self.bn1[task](x)
else:
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
if self.norm_per_task:
x = self.bn2[task](x)
else:
x = self.bn2(x)
return x
class InvResidualExpand(nn.Module):
"""Expanding inverted residual block for Mobilenet
"""
def __init__(self, tasks, norm_per_task, inp, hidden_dim, oup, stride, dilation=1):
super(InvResidualExpand, self).__init__()
self.norm_per_task = norm_per_task
self.conv1 = nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False)
if self.norm_per_task:
self.bn1 = nn.ModuleDict({task: nn.BatchNorm2d(hidden_dim) for task in tasks})
else:
self.bn1 = nn.BatchNorm2d(hidden_dim)
self.conv2 = nn.Conv2d(hidden_dim, hidden_dim, 3, stride, padding=dilation,
groups=hidden_dim, bias=False, dilation=dilation)
if self.norm_per_task:
self.bn2 = nn.ModuleDict({task: nn.BatchNorm2d(hidden_dim) for task in tasks})
else:
self.bn2 = nn.BatchNorm2d(hidden_dim)
self.conv3 = nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False)
if self.norm_per_task:
print('Initializing batchnorm per task on encoder')
self.bn3 = nn.ModuleDict({task: nn.BatchNorm2d(oup) for task in tasks})
else:
self.bn3 = nn.BatchNorm2d(oup)
self.relu = nn.ReLU6(inplace=True)
def forward(self, x, task=None):
x = self.conv1(x)
if self.norm_per_task:
x = self.bn1[task](x)
else:
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
if self.norm_per_task:
x = self.bn2[task](x)
else:
x = self.bn2(x)
x = self.relu(x)
x = self.conv3(x)
if self.norm_per_task:
x = self.bn3[task](x)
else:
x = self.bn3(x)
return x
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio, tasks, dilation=1,
norm_per_task=False, use_modulation=False):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
self.norm_per_task = norm_per_task
hidden_dim = round(inp * expand_ratio)
self.use_res_connect = self.stride == 1 and inp == oup
if expand_ratio == 1:
self.conv = InvResidualCommon(tasks=tasks,
norm_per_task=self.norm_per_task,
hidden_dim=hidden_dim,
oup=oup,
stride=stride,
dilation=dilation)
else:
self.conv = InvResidualExpand(tasks=tasks,
norm_per_task=self.norm_per_task,
inp=inp,
hidden_dim=hidden_dim,
oup=oup,
stride=stride,
dilation=dilation)
if use_modulation:
print('Initializing SE per task on encoder')
self.se = SEMobileMultiTaskDict(tasks=tasks, channel=oup, reduction=4)
else:
self.se = SEMobileMultiTaskDict(tasks=None, channel=oup, reduction=4)
def forward(self, x, task=None):
if self.use_res_connect:
out = self.conv(x, task)
out = self.se(out, task)
return x + out
else:
out = self.conv(x, task)
out = self.se(out, task)
return out
class ASPPMnet(nn.Module):
"""
Atrous Spatial Pyramid Pooling Module (DeepLab-v3+) for mobilenet
"""
def __init__(self, dilation_series=None, depth=64, in_f=320, tasks=None, squeeze=False,
norm_per_task=True):
super(ASPPMnet, self).__init__()
if dilation_series is None:
dilation_series = [6, 12, 18]
padding_series = dilation_series
self.bnorm = nn.BatchNorm2d
self.squeeze = squeeze
kwargs = {"num_features": depth, "affine": True}
self.conv2d_list = nn.ModuleList()
# 1x1 convolution
self.conv2d_list.append(
ConvCoupledSEMnet(tasks=tasks,
process_layers=nn.Conv2d(in_f, depth, kernel_size=1, stride=1, bias=False),
norm_kwargs=kwargs,
norm_per_task=norm_per_task,
squeeze=self.squeeze))
for dilation, padding in zip(dilation_series, padding_series):
self.conv2d_list.append(
ConvCoupledSEMnet(tasks=tasks,
process_layers=nn.Conv2d(in_f, depth, kernel_size=3, stride=1, padding=padding,
dilation=dilation, bias=False, groups=depth),
norm_kwargs=kwargs,
norm_per_task=norm_per_task,
squeeze=self.squeeze))
# Global features
self.conv2d_list.append(
ConvCoupledSEMnet(tasks=tasks,
process_layers=[nn.AdaptiveAvgPool2d(output_size=(1, 1)),
nn.Conv2d(in_f, depth, kernel_size=1, stride=1,
bias=False, groups=depth)],
norm_kwargs=kwargs,
norm_per_task=norm_per_task))
self.conv2d_final = ConvCoupledSEMnet(tasks=tasks,
process_layers=nn.Conv2d(depth * 5, depth, kernel_size=1,
stride=1, bias=False, groups=depth),
norm_kwargs=kwargs,
norm_per_task=norm_per_task,
squeeze=self.squeeze)
def forward(self, x, task=None):
h, w = x.size(2), x.size(3)
interm = []
for i in range(len(self.conv2d_list)):
interm.append(self.conv2d_list[i](x, task))
# Upsample the global features
interm[-1] = F.interpolate(input=interm[-1], size=(h, w), mode='bilinear', align_corners=False)
# Concatenate the parallel streams
out = torch.cat(interm, dim=1)
# Final convolutional layer of the classifier
out = self.conv2d_final(out, task)
return out
class ASPPDecoderMnet(nn.Module):
"""
ASPP-v3 decoder for Mobilenet
"""
def __init__(self,
tasks,
n_classes,
in_channels_high=320,
in_channels_low=24,
out_f_classifier=64,
atrous_rates=None,
norm_per_task=True,
squeeze=False,
up=4,
):
super(ASPPDecoderMnet, self).__init__()
print('Initializing Mobilenet ASPP v3 Decoder for multiple tasks')
if atrous_rates is None:
atrous_rates = [6, 12, 18]
out_f_low = int(48 * out_f_classifier / 256)
kwargs_out = {"num_features": out_f_classifier, "affine": True}
kwargs_low = {"num_features": out_f_low, "affine": True}
self.up = up
self.tasks = tasks
print('Initializing classifier: ASPP with global features (Deeplab-v3+)')
self.layer5 = ASPPMnet(in_f=in_channels_high,
depth=out_f_classifier,
dilation_series=atrous_rates,
tasks=self.tasks,
norm_per_task=norm_per_task,
squeeze=squeeze)
self.low_level_reduce = ConvCoupledSEMnet(tasks=tasks,
process_layers=nn.Conv2d(in_channels_low, out_f_low, kernel_size=1,
stride=1, bias=False,
groups=math.gcd(in_channels_low, out_f_low)),
norm_kwargs=kwargs_low,
norm_per_task=norm_per_task,
squeeze=squeeze)
self.conv_concat = ConvCoupledSEMnet(tasks=tasks,
process_layers=nn.Conv2d(out_f_classifier + out_f_low, out_f_classifier,
kernel_size=3, stride=1, bias=False, padding=1,
groups=math.gcd(out_f_classifier + out_f_low,
out_f_classifier)),
norm_kwargs=kwargs_out,
norm_per_task=norm_per_task,
squeeze=squeeze)
self.conv_process = ConvCoupledSEMnet(tasks=tasks,
process_layers=conv3x3_mnet(out_f_classifier),
norm_kwargs=kwargs_out,
norm_per_task=norm_per_task,
squeeze=squeeze)
self.conv_predict = nn.ModuleDict(
{task: nn.Conv2d(out_f_classifier, n_classes[task], kernel_size=1, bias=True) for task in tasks})
def forward(self, x_low, x, task=None):
x_low = self.low_level_reduce(x_low, task)
x = self.layer5(x, task)
x = F.interpolate(x, scale_factor=self.up, mode='bilinear', align_corners=False)
x = torch.cat((x_low, x), dim=1)
x = self.conv_concat(x, task)
x = self.conv_process(x, task)
features = x
x = self.conv_predict[task](x)
return x, features
class SEMobileNetV2(nn.Module):
def __init__(self, n_classes, width_mult=1., output_stride=16,
tasks=None, train_norm_layers=False, mod_enc=False, mod_dec=False,
use_dscr=False, dscr_k=1, dscr_d=2):
super(SEMobileNetV2, self).__init__()
self.use_dscr = use_dscr
self.norm_per_task_enc = train_norm_layers and tasks and mod_enc
self.norm_per_task_dec = train_norm_layers and tasks and mod_dec
self.tasks = tasks
self.task_dict = {x: i for i, x in enumerate(self.tasks)}
atrous_rates = [6, 12, 18]
if output_stride == 8:
dilations = (2, 4)
strides = (2, 2, 2, 1, 1)
atrous_rates = [x * 2 for x in atrous_rates]
elif output_stride == 16:
dilations = (1, 2)
strides = (2, 2, 2, 2, 1)
else:
raise ValueError('Choose between output_stride 8 and 16')
block = InvertedResidual
input_channel = 32
interverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1, 1],
[6, 24, 2, strides[1], 1],
[6, 32, 3, strides[2], 1],
[6, 64, 4, strides[3], dilations[0]],
[6, 96, 3, 1, dilations[0]],
[6, 160, 3, strides[4], dilations[1]],
[6, 320, 1, 1, dilations[1]],
]
input_channel = int(input_channel * width_mult)
# build first layer
self.features = [ConvBNMTL(3, input_channel, strides[0])]
# build inverted residual blocks
for t, c, n, s, dil in interverted_residual_setting:
output_channel = int(c * width_mult)
for i in range(n):
if i == 0:
self.features.append(block(input_channel, output_channel, s,
expand_ratio=t,
dilation=dil,
tasks=self.tasks,
norm_per_task=self.norm_per_task_enc,
use_modulation=mod_enc))
else:
self.features.append(block(input_channel, output_channel, 1,
expand_ratio=t,
dilation=dil,
tasks=self.tasks,
norm_per_task=self.norm_per_task_enc,
use_modulation=mod_enc))
input_channel = output_channel
# make it nn.Sequential
self.features = SequentialMultiTask(*self.features)
self.features_low = self.features[:4]
self.features_high = self.features[4:]
self.decoder = ASPPDecoderMnet(n_classes=n_classes,
in_channels_high=320,
in_channels_low=24,
out_f_classifier=64,
atrous_rates=atrous_rates,
tasks=self.tasks,
norm_per_task=self.norm_per_task_dec,
squeeze=mod_dec)
if self.use_dscr:
self.dscr_k = dscr_k
self.dscr_d = dscr_d
self.task_label_shape = (128, 128)
print('Using Discriminator with kernel size: {} and depth: {}'.format(self.dscr_k, self.dscr_d))
self.discriminator = self._get_discriminator(width_decoder=64)
self.rev_layer = ReverseLayerF()
self.criterion_classifier = torch.nn.CrossEntropyLoss(ignore_index=255)
self._initialize_weights()
self._verify_bnorm_params()
def forward(self, x, task=None):
in_shape = x.shape[2:]
x_low = self.features_low(x, task)
x = self.features_high(x_low, task)
x, features = self.decoder(x_low, x, task)
x = F.interpolate(x, size=(in_shape[-2], in_shape[-1]), mode='bilinear', align_corners=False)
return x, features
def compute_losses(self, outputs, features, criteria, gt_elems, alpha, p):
"""
Computes losses for tasks, losses for discriminator, output of discriminator, and gradients
"""
# Compute classification losses and gradients wrt features
tasks = outputs.keys()
grads = {}
losses_tasks = {}
task_labels = {}
outputs_dscr = {}
losses_dscr = {}
with torch.enable_grad():
for task in tasks:
curr_loss = p.TASKS.LOSS_MULT[task] * criteria[task](outputs[task], gt_elems[task])
losses_tasks[task] = curr_loss
if self.use_dscr:
# Create task labels
task_labels[task] = self._create_task_labels(gt_elems, task).to(outputs[task].device)
# Compute Gradients
grads[task] = grad(curr_loss, features[task], create_graph=True)[0]
grads_norm = grads[task].norm(p=2, dim=1).unsqueeze(1) + 1e-10
input_dscr = grads[task] / grads_norm
input_dscr = self.rev_layer.apply(input_dscr, alpha)
outputs_dscr[task] = self.discriminator(input_dscr)
losses_dscr[task] = self.criterion_classifier(outputs_dscr[task], task_labels[task])
return losses_tasks, losses_dscr, outputs_dscr, grads, task_labels
def _create_task_labels(self, gt_elems, task):
valid = gt_elems[task].detach().clone()
valid = F.interpolate(valid, size=self.task_label_shape, mode='nearest')
valid[valid != 255] = self.task_dict[task]
valid = valid[:, 0, :, :]
return valid.long()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def _verify_bnorm_params(self):
verify_trainable = True
a = 0
for x in self.modules():
if isinstance(x, nn.BatchNorm2d):
for y in x.parameters():
verify_trainable = (verify_trainable and y.requires_grad)
a += isinstance(x, nn.BatchNorm2d)
print("\nVerification: Trainable batchnorm parameters? Answer: {}\n".format(verify_trainable))
print("Asynchronous bnorm layers: {}".format(a))
def _get_discriminator(self, width_decoder):
discriminator = FullyConvDiscriminator(in_channels=width_decoder, n_classes=len(self.tasks),
kernel_size=self.dscr_k, depth=self.dscr_d)
return discriminator
def _define_if_copyable(self, module):
is_copyable = isinstance(module, nn.Conv2d) \
or isinstance(module, nn.Linear) \
or isinstance(module, nn.BatchNorm2d) or \
isinstance(module, nn.BatchNorm2d)
return is_copyable
def _exists_task_in_name(self, layer_name):
for task in self.tasks:
if layer_name.find(task) > 0:
return task
return None
def load_pretrained(self, base_network):
copy_trg = {}
for (name_trg, module_trg) in self.named_modules():
if self._define_if_copyable(module_trg):
copy_trg[name_trg] = module_trg
copy_src = {}
for (name_src, module_src) in base_network.named_modules():
if self._define_if_copyable(module_src):
copy_src[name_src] = module_src
task_specific_counter = 0
mapping = {}
for name_trg in copy_trg:
# Handle first layers
if 'decoder' in name_trg:
continue
elif 'discriminator' in name_trg:
continue
elif name_trg in copy_src:
map_trg = name_trg
elif 'features.0' in name_trg:
map_trg = name_trg.replace('.conv', '')
elif '.conv1' in name_trg:
map_trg = name_trg.replace('.conv1', '.0')
elif '.bn1' in name_trg:
map_trg = name_trg.replace('.bn1', '.1')
elif '.conv2' in name_trg:
map_trg = name_trg.replace('.conv2', '.3')
elif '.bn2' in name_trg:
map_trg = name_trg.replace('.bn2', '.4')
elif '.conv3' in name_trg:
map_trg = name_trg.replace('.conv3', '.6')
elif '.bn3' in name_trg:
map_trg = name_trg.replace('.bn3', '.7')
elif self._exists_task_in_name(name_trg):
# Handle SE layers
task = self._exists_task_in_name(name_trg)
name_src = name_trg.replace('.' + task, '')
if name_src in copy_src:
map_trg = name_src
task_specific_counter += 1
else:
raise ValueError('Unknown module name found: {}'.format(name_trg))
# Handle BatchNom2d layers
task = self._exists_task_in_name(map_trg)
if task:
map_trg = map_trg.replace('.' + task, '')
mapping[name_trg] = map_trg
i = 0
for name in mapping:
module_trg = copy_trg[name]
module_src = copy_src[mapping[name]]
if module_trg.weight.data.shape != module_src.weight.data.shape:
print('skipping layer with size: {} and target size: {}'
.format(module_trg.weight.data.shape, module_src.weight.data.shape))
continue
if isinstance(module_trg, nn.Conv2d) and isinstance(module_src, nn.Conv2d):
module_trg.weight.data = module_src.weight.data.clone()
if module_src.bias is not None:
module_trg.bias = module_src.bias.clone()
i += 1
elif isinstance(module_trg, nn.BatchNorm2d) and isinstance(module_src, nn.BatchNorm2d):
# copy running mean and variance of batchnorm layers!
module_trg.running_mean.data = module_src.running_mean.data.clone()
module_trg.running_var.data = module_src.running_var.data.clone()
module_trg.weight.data = module_src.weight.data.clone()
module_trg.bias.data = module_src.bias.data.clone()
i += 1
elif isinstance(module_trg, nn.Linear) and (isinstance(module_src, nn.Linear)):
module_trg.weight.data = module_src.weight.data.clone()
module_trg.bias.data = module_src.bias.data.clone()
i += 1
print('\nContents of {} out of {} layers successfully copied, including {} task-specific layers\n'
.format(i, len(mapping), task_specific_counter))
def get_lr_params(model, part='all', tasks=None):
"""
This generator returns all the parameters of the backbone
"""
def ismember(layer_txt, tasks):
exists = False
for task in tasks:
exists = exists or layer_txt.find(task) > 0
return exists
# for multi-GPU training
if hasattr(model, 'module'):
model = model.module
if part == 'all':
b = [model]
elif part == 'backbone':
b = [model.features]
elif part == 'decoder':
b = [model.decoder]
elif part == 'generic':
b = [model]
elif part == 'task_specific':
b = [model]
elif part == 'discriminator':
b = [model.discriminator]
for i in range(len(b)):
for name, k in b[i].named_parameters():
if k.requires_grad:
if part == 'generic' or part == 'decoder' or part == 'backbone':
if ismember(name, tasks):
continue
elif part == 'task_specific':
if not ismember(name, tasks):
continue
yield k
def se_mobilenet_v2(pretrained='scratch', **kwargs):
model = SEMobileNetV2(**kwargs)
if pretrained == 'imagenet':
print('loading pre-trained imagenet model')
model_full = se_mobilenet_v2_imagenet.se_mobilenet_v2(pretrained=True)
model.load_pretrained(model_full)
elif pretrained == 'scratch':
print('using imagenet initialized from scratch')
else:
raise NotImplementedError('select either imagenet or scratch for pre-training')
return model
def main():
import fblib.util.pdf_visualizer as viz
elems = ['image', 'semseg', 'edge']
tasks = ['semseg', 'edge']
net = se_mobilenet_v2(pretrained='imagenet',
n_classes={'edge': 1, 'semseg': 21},
tasks=tasks,
mod_enc=True,
mod_dec=True,
train_norm_layers=True)
net.cuda()
net.eval()
img = torch.rand(2, 3, 512, 512)
img = img.cuda()
y = {}
for task in tasks:
y[task], _ = net.forward(img, task=elems[-1])
g = viz.make_dot(y, net.state_dict())
g.view(directory='./')
if __name__ == '__main__':
main()
| astmt-master | fblib/networks/deeplab_multi_task/deeplab_se_mobilenet_v2_multitask.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from copy import deepcopy
import torch
import torch.nn as nn
from torch.nn import init
from torch.nn import functional as F
from torch.autograd import grad
import fblib.networks.classification.se_resnet as se_resnet
from fblib.networks.deeplab_multi_task.decoders import ASPPv3Plus, UbernetDecoder
from fblib.networks.deeplab_multi_task.discriminators import FullyConvDiscriminator
from fblib.layers.reverse_grad import ReverseLayerF
from fblib.layers.squeeze import SELayerMultiTaskDict
from fblib.util.custom_container import SequentialMultiTask
try:
from itertools import izip
except ImportError: # python3.x
izip = zip
affine_par = True
class ConvCoupledBatchNormMT(nn.Module):
def __init__(self,
tasks=None,
process_layer=None,
norm=nn.BatchNorm2d,
norm_kwargs=None,
train_norm=False):
super(ConvCoupledBatchNormMT, self).__init__()
self.tasks = tasks
# Processing layer
self.process = process_layer
# Batch Norm layer(s)
if tasks is not None:
print('Using per-task batchnorm parameters in Encoder: Downsampling')
self.norm = nn.ModuleDict({task: norm(**norm_kwargs) for task in self.tasks})
else:
self.norm = norm(**norm_kwargs)
# Define whether batchnorm parameters are trainable
for i in self.norm.parameters():
i.requires_grad = train_norm
def forward(self, x, task=None):
x = self.process(x)
if self.tasks is None:
x = self.norm(x)
else:
x = self.norm[task](x)
return x
class SEBottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, train_norm_layers=False,
reduction=16, tasks=None, squeeze_enc=True, adapters=False):
super(SEBottleneck, self).__init__()
self.adapters = adapters
self.per_task_norm_layers = train_norm_layers and tasks
padding = dilation
self.bnorm = nn.BatchNorm2d
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False) # change
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, # change
padding=padding, bias=False, dilation=dilation)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
if self.adapters:
print('Using parallel adapters in Encoder')
self.adapt = nn.ModuleDict({task: nn.Conv2d(planes, planes, kernel_size=1, bias=False) for task in tasks})
if self.per_task_norm_layers:
print('Using per-task batchnorm parameters in Encoder')
self.bn1 = nn.ModuleDict({task: self.bnorm(planes, affine=affine_par) for task in tasks})
self.bn2 = nn.ModuleDict({task: self.bnorm(planes, affine=affine_par) for task in tasks})
self.bn3 = nn.ModuleDict({task: self.bnorm(planes * 4, affine=affine_par) for task in tasks})
else:
self.bn1 = self.bnorm(planes, affine=affine_par)
self.bn2 = self.bnorm(planes, affine=affine_par)
self.bn3 = self.bnorm(planes * 4, affine=affine_par)
for i in self.bn1.parameters():
i.requires_grad = train_norm_layers
for i in self.bn2.parameters():
i.requires_grad = train_norm_layers
for i in self.bn3.parameters():
i.requires_grad = train_norm_layers
self.relu = nn.ReLU(inplace=True)
if squeeze_enc:
self.se = SELayerMultiTaskDict(channel=planes * 4, reduction=reduction, tasks=tasks)
else:
self.se = SELayerMultiTaskDict(channel=planes * 4, reduction=reduction, tasks=None)
self.downsample = downsample
self.stride = stride
def forward(self, x, task=None):
residual = x
out = self.conv1(x)
if self.per_task_norm_layers:
out = self.bn1[task](out)
else:
out = self.bn1(out)
out = self.relu(out)
if self.adapters:
out = self.adapt[task](out) + self.conv2(out)
else:
out = self.conv2(out)
if self.per_task_norm_layers:
out = self.bn2[task](out)
else:
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
if self.per_task_norm_layers:
out = self.bn3[task](out)
else:
out = self.bn3(out)
out = self.se(out, task)
if self.downsample is not None:
residual = self.downsample(x, task)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, n_classes, classifier='atrous-v3', output_stride=16, tasks=None,
train_norm_layers=False, width_decoder=256, squeeze_enc=True, squeeze_dec=False, adapters=False,
norm_per_task=False, dscr_type='fconv', dscr_d=2, dscr_k=1):
super(ResNet, self).__init__()
print("Constructing ResNet model...")
print("Output stride: {}".format(output_stride))
print("Number of classes: {}".format(n_classes))
v3_atrous_rates = [6, 12, 18]
if output_stride == 8:
dilations = (2, 4)
strides = (2, 2, 2, 1, 1)
v3_atrous_rates = [x * 2 for x in v3_atrous_rates]
elif output_stride == 16:
dilations = (1, 2)
strides = (2, 2, 2, 2, 1)
else:
raise ValueError('Choose between output_stride 8 and 16')
self.inplanes = 64
self.classifier = classifier
self.train_norm_layers = train_norm_layers
self.bnorm = nn.BatchNorm2d
self.squeeze_enc = squeeze_enc
self.adapters = adapters
self.tasks = tasks
self.task_dict = {x: i for i, x in enumerate(self.tasks)}
self.per_task_norm_layers = self.train_norm_layers and self.tasks
self.use_dscr = True if dscr_type is not None else False
# Network structure
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=strides[0], padding=3, bias=False)
self.bn1 = self.bnorm(64, affine=affine_par)
for i in self.bn1.parameters():
i.requires_grad = self.train_norm_layers
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=strides[1], padding=1, ceil_mode=False)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=strides[2])
self.layer3 = self._make_layer(block, 256, layers[2], stride=strides[3], dilation=dilations[0])
self.layer4 = self._make_layer(block, 512, layers[3], stride=strides[4], dilation=dilations[1])
width_low = 48 * width_decoder / 256 # Adapt in case of thinner classifiers
assert (int(width_low) == width_low)
print('Using decoder')
if classifier == 'atrous-v3' or classifier == 'conv':
print('Initializing classifier: A-trous with global features (Deeplab-v3+)')
self.decoder = ASPPv3Plus(tasks=self.tasks,
n_classes=n_classes,
classifier=classifier,
in_channels_low=256,
in_channels_high=2048,
out_f_classifier=width_decoder,
atrous_rates=v3_atrous_rates,
norm=self.bnorm,
norm_per_task=norm_per_task,
squeeze=squeeze_dec,
adapters=self.adapters,
)
elif classifier == 'uber':
print('Initializing Ubernet classifier')
self.decoder = UbernetDecoder(tasks=self.tasks,
n_classes=n_classes,
in_channels_low=256,
in_channels_high=2048,
norm=self.bnorm)
else:
raise NotImplementedError('Choose one of the available classifiers')
if self.use_dscr:
print('Using Discriminator')
self.dscr_d = dscr_d
self.dscr_k = dscr_k
self.task_label_shape = (128, 128)
self.discriminator = self._get_discriminator(width_decoder)
self.rev_layer = ReverseLayerF()
self.criterion_classifier = torch.nn.CrossEntropyLoss(ignore_index=255)
self._initialize_weights()
# Check if batchnorm parameters are trainable
self._verify_bnorm_params()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion or dilation == 2 or dilation == 4:
downsample = ConvCoupledBatchNormMT(tasks=self.tasks if self.per_task_norm_layers else None,
process_layer=nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
norm=self.bnorm,
norm_kwargs={'num_features': planes * block.expansion,
'affine': affine_par},
train_norm=self.train_norm_layers)
layers = [block(self.inplanes, planes, stride=stride, dilation=dilation, downsample=downsample,
train_norm_layers=self.train_norm_layers, tasks=self.tasks,
squeeze_enc=self.squeeze_enc, adapters=self.adapters)]
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation,
train_norm_layers=self.train_norm_layers, tasks=self.tasks,
squeeze_enc=self.squeeze_enc, adapters=self.adapters))
return SequentialMultiTask(*layers)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, self.bnorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
init.kaiming_normal_(m.weight)
m.bias.data.zero_()
# zero-initialization for residual adapters
for name, m in self.named_modules():
if name.find('adapt') >= 0 and isinstance(m, nn.Conv2d):
m.weight.data.fill_(0)
def _verify_bnorm_params(self):
verify_trainable = True
a = 0
for x in self.modules():
if isinstance(x, nn.BatchNorm2d):
for y in x.parameters():
verify_trainable = (verify_trainable and y.requires_grad)
a += isinstance(x, nn.BatchNorm2d)
print("\nVerification: Trainable batchnorm parameters? Answer: {}\n".format(verify_trainable))
print("Asynchronous bnorm layers: {}".format(a))
def forward(self, x_in, task=None):
x = x_in
in_shape = x.shape[2:]
# First Layers
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
# Stage #1 and low-level features
x = self.layer1(x, task)
x_low = x
# Stages #2 - #4
x = self.layer2(x, task=task)
x = self.layer3(x, task=task)
x = self.layer4(x, task=task)
# Decoder
x, features = self.decoder(x_low, x, task=task)
out = F.interpolate(x, size=(in_shape[-2], in_shape[-1]), mode='bilinear', align_corners=False)
return out, features
def compute_losses(self, outputs, features, criteria, gt_elems, alpha, p):
"""
Computes losses for tasks, losses for discriminator, output of discriminator, and gradients
"""
# Compute classification losses and gradients wrt features
tasks = outputs.keys()
grads = {}
losses_tasks = {}
task_labels = {}
outputs_dscr = {}
losses_dscr = {}
with torch.enable_grad():
for task in tasks:
curr_loss = p.TASKS.LOSS_MULT[task] * criteria[task](outputs[task], gt_elems[task])
losses_tasks[task] = curr_loss
if self.use_dscr:
# Create task labels
task_labels[task] = self._create_task_labels(gt_elems, task).to(outputs[task].device)
# Compute Gradients
grads[task] = grad(curr_loss, features[task], create_graph=True)[0]
grads_norm = grads[task].norm(p=2, dim=1).unsqueeze(1) + 1e-10
input_dscr = grads[task] / grads_norm
input_dscr = self.rev_layer.apply(input_dscr, alpha)
outputs_dscr[task] = self.discriminator(input_dscr)
losses_dscr[task] = self.criterion_classifier(outputs_dscr[task], task_labels[task])
return losses_tasks, losses_dscr, outputs_dscr, grads, task_labels
def _get_discriminator(self, width_decoder):
discriminator = FullyConvDiscriminator(in_channels=width_decoder, n_classes=len(self.tasks),
kernel_size=self.dscr_k, depth=self.dscr_d)
return discriminator
def _create_task_labels(self, gt_elems, task):
valid = deepcopy(gt_elems[task].detach())
valid = F.interpolate(valid, size=self.task_label_shape, mode='nearest')
valid[valid != 255] = self.task_dict[task]
valid = valid[:, 0, :, :]
return valid.long()
def _define_if_copyable(self, module):
is_copyable = isinstance(module, nn.Conv2d) \
or isinstance(module, nn.Linear) \
or isinstance(module, nn.BatchNorm2d) or \
isinstance(module, self.bnorm)
return is_copyable
def _exists_task_in_name(self, layer_name):
for task in self.tasks:
if layer_name.find(task) > 0:
return task
return None
def load_pretrained(self, base_network):
copy_trg = {}
for (name_trg, module_trg) in self.named_modules():
if self._define_if_copyable(module_trg):
copy_trg[name_trg] = module_trg
copy_src = {}
for (name_src, module_src) in base_network.named_modules():
if self._define_if_copyable(module_src):
copy_src[name_src] = module_src
task_specific_counter = 0
mapping = {}
for name_trg in copy_trg:
if name_trg in copy_src:
mapping[name_trg] = name_trg
# Copy ImageNet SE layers to each task-specific layer
elif self.tasks is not None:
task = self._exists_task_in_name(name_trg)
if task:
name_src = name_trg.replace('.' + task, '')
if name_src in copy_src:
mapping[name_trg] = name_src
task_specific_counter += 1
# Handle downsampling layers
for name_trg in copy_trg:
name_src = None
if name_trg.find('downsample') > 0:
if name_trg.find('process') > 0:
name_src = name_trg.replace('process', '0')
elif name_trg.find('norm'):
if self.per_task_norm_layers is not None:
task = self._exists_task_in_name(name_trg)
if task:
name_src = name_trg.replace('norm.' + task, '1')
else:
name_src = name_trg.replace('norm', '1')
if name_src in copy_src:
mapping[name_trg] = name_src
i = 0
for name in mapping:
module_trg = copy_trg[name]
module_src = copy_src[mapping[name]]
if module_trg.weight.data.shape != module_src.weight.data.shape:
print('Skipping layer with size: {} and target size: {}'
.format(module_trg.weight.data.shape, module_src.weight.data.shape))
continue
if isinstance(module_trg, nn.Conv2d) and isinstance(module_src, nn.Conv2d):
module_trg.weight.data = deepcopy(module_src.weight.data)
module_trg.bias = deepcopy(module_src.bias)
i += 1
elif isinstance(module_trg, self.bnorm) and (isinstance(module_src, nn.BatchNorm2d)
or isinstance(module_src, self.bnorm)):
# Copy running mean and variance of batchnorm layers!
module_trg.running_mean.data = deepcopy(module_src.running_mean.data)
module_trg.running_var.data = deepcopy(module_src.running_var.data)
module_trg.weight.data = deepcopy(module_src.weight.data)
module_trg.bias.data = deepcopy(module_src.bias.data)
i += 1
elif isinstance(module_trg, nn.Linear) and (isinstance(module_src, nn.Linear)):
module_trg.weight.data = deepcopy(module_src.weight.data)
module_trg.bias.data = deepcopy(module_src.bias.data)
i += 1
print('\nContent of {} out of {} layers successfully copied, including {} task-specific layers\n'
.format(i, len(mapping), task_specific_counter))
def get_lr_params(model, part='all', tasks=None):
"""
This generator returns all the parameters of the backbone
"""
def ismember(layer_txt, tasks):
exists = False
for task in tasks:
exists = exists or layer_txt.find(task) > 0
return exists
# for multi-GPU training
if hasattr(model, 'module'):
model = model.module
if part == 'all':
b = [model]
elif part == 'backbone':
b = [model.conv1, model.bn1, model.layer1, model.layer2, model.layer3, model.layer4]
elif part == 'decoder':
b = [model.decoder]
elif part == 'generic':
b = [model]
elif part == 'task_specific':
b = [model]
elif part == 'discriminator':
b = [model.discriminator]
for i in range(len(b)):
for name, k in b[i].named_parameters():
if k.requires_grad:
if part == 'generic' or part == 'decoder' or part == 'backbone':
if ismember(name, tasks):
continue
elif part == 'task_specific':
if not ismember(name, tasks):
continue
yield k
def se_resnet26(n_classes, pretrained='scratch', **kwargs):
"""Constructs a SE-ResNet-18 model.
Args:
pretrained (str): If True, returns a model pre-trained on ImageNet
"""
print('Constructing ResNet18')
model = ResNet(SEBottleneck, [2, 2, 2, 2], n_classes, **kwargs)
if pretrained == 'imagenet':
print('Loading pre-trained ImageNet model')
model_full = se_resnet.se_resnet26(pretrained=True)
model.load_pretrained(model_full)
elif pretrained == 'scratch':
print('Training from scratch!')
else:
raise NotImplementedError('Select between scratch and imagenet for pre-training')
return model
def se_resnet50(n_classes, pretrained='scratch', **kwargs):
"""Constructs a SE-ResNet-50 model.
Args:
pretrained (str): If True, returns a model pre-trained on ImageNet
"""
print('Constructing ResNet50')
model = ResNet(SEBottleneck, [3, 4, 6, 3], n_classes, **kwargs)
if pretrained == 'imagenet':
print('Loading pre-trained ImageNet model')
model_full = se_resnet.se_resnet50(pretrained=True)
model.load_pretrained(model_full)
elif pretrained == 'scratch':
print('Training from scratch!')
else:
raise NotImplementedError('Select between scratch and imagenet for pre-training')
return model
def se_resnet101(n_classes, pretrained='scratch', **kwargs):
"""Constructs a SE-ResNet-101 model.
Args:
pretrained (str): Select model trained on respective dataset.
"""
print('Constructing ResNet101')
model = ResNet(SEBottleneck, [3, 4, 23, 3], n_classes, **kwargs)
if pretrained == 'imagenet':
print('Loading pre-trained ImageNet model')
model_full = se_resnet.se_resnet101(pretrained=True)
model.load_pretrained(model_full)
elif pretrained == 'scratch':
print('Training from scratch!')
else:
raise NotImplementedError('Select between scratch and imagenet for pre-training')
return model
def test_vis_net(net, elems):
import cv2
from torchvision import transforms
from fblib.dataloaders import custom_transforms as tr
from fblib.dataloaders.pascal_context import PASCALContext
from torch.utils.data import DataLoader
import fblib.util.visualizepy as viz
tasks = elems[1:]
net.cuda()
# Define the transformations
transform = transforms.Compose(
[tr.FixedResize(resolutions={x: (512, 512) for x in elems},
flagvals={x: cv2.INTER_NEAREST for x in elems}),
tr.ToTensor()])
# Define dataset, tasks, and the dataloader
dataset = PASCALContext(split=['train'],
transform=transform,
do_edge=True,
do_human_parts=True,
do_semseg=True)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=2, shuffle=False, num_workers=0)
net.eval()
sample = next(iter(dataloader))
img = sample['image']
task_gts = list(sample.keys())
img = img.cuda()
y = {}
for task in task_gts:
if task in tasks:
y[task], _ = net.forward(img, task=task)
g = viz.make_dot(y, net.state_dict())
g.view(directory='./')
def test_gflops(net, elems):
from fblib.util.model_resources.flops import compute_gflops
batch_size = 2
gflops_1_task = compute_gflops(net, in_shape=(batch_size, 3, 256, 256), tasks=elems[1])
print('GFLOPS for 1 task: {}'.format(gflops_1_task / batch_size))
def test_lr_params(net, tasks):
params = get_lr_params(net, part='generic', tasks=tasks)
for p in params:
print(p)
def main():
elems = ['image', 'edge', 'semseg']
tasks = elems[1:]
squeeze_enc = False
squeeze_dec = False
adapters = False
width_decoder = 256
norm_per_task = False
# Load Network
net = se_resnet26(n_classes={'edge': 1, 'semseg': 21},
pretrained='imagenet',
classifier='atrous-v3',
output_stride=8,
tasks=tasks,
width_decoder=width_decoder,
squeeze_enc=squeeze_enc,
squeeze_dec=squeeze_dec,
adapters=adapters,
norm_per_task=norm_per_task,
train_norm_layers=True,
dscr_type=None)
test_vis_net(net, elems)
test_gflops(net, elems)
test_lr_params(net, tasks)
if __name__ == '__main__':
main()
| astmt-master | fblib/networks/deeplab_multi_task/deeplab_se_resnet_multitask.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
from fblib.networks.deeplab_multi_task.classifiers_multitask import AtrousSpatialPyramidPoolingModule
from fblib.layers.squeeze import ConvCoupledSE
def conv3x3(in_planes, out_planes, stride=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
dilation=dilation, padding=dilation, bias=False)
class UbernetDecoder(nn.Module):
"""
Simple Shallow decoder (like Ubernet)
"""
def __init__(self,
tasks,
in_channels_low,
in_channels_high,
n_classes,
norm=nn.BatchNorm2d,
ret_features=True):
super(UbernetDecoder, self).__init__()
self.tasks = tasks
self.ret_features = ret_features
self.high_level = nn.ModuleDict()
self.low_level = nn.ModuleDict()
self.predict = nn.ModuleDict()
for task in tasks:
self.high_level[task] = nn.Sequential(nn.Conv2d(in_channels=in_channels_high,
out_channels=n_classes[task],
kernel_size=1,
bias=False),
norm(n_classes[task]),
nn.ReLU(inplace=True))
self.low_level[task] = nn.Sequential(nn.Conv2d(in_channels=in_channels_low,
out_channels=n_classes[task],
kernel_size=1,
bias=False),
norm(n_classes[task]),
nn.ReLU(inplace=True))
self.predict[task] = nn.Conv2d(in_channels=2 * n_classes[task],
out_channels=n_classes[task],
kernel_size=1,
bias=True)
def forward(self, x_low, x_high, task=None):
# Reduce dimensionality of low-level features
x_low = self.low_level[task](x_low)
# Reduce dimensionality of high-level features and upscale
x_high = self.high_level[task](x_high)
x_high = F.interpolate(x_high, size=(x_low.shape[2], x_low.shape[3]), mode='bilinear', align_corners=False)
# Concatenate features
x = torch.cat((x_low, x_high), dim=1)
features = x
# Make final prediction
x = self.predict[task](x)
if self.ret_features:
return x, features
else:
return x
def test_ubernet():
print('Testing UberNet-like decoder')
tasks = ['edge', 'semseg', 'human_parts']
out_channels = {'edge': 1, 'semseg': 21, 'human_parts': 7}
in_channels_low = 256
in_channels_high = 2048
x_low = torch.rand(2, in_channels_low, 128, 128)
x_high = torch.rand(2, in_channels_high, 64, 64)
net = UbernetDecoder(tasks=tasks,
in_channels_low=in_channels_low,
in_channels_high=in_channels_high,
n_classes=out_channels)
x_low, x_high, net = x_low.cuda(), x_high.cuda(), net.cuda()
for task in tasks:
out, _ = net(x_low, x_high, task=task)
print('Task: {}, Output Shape: {}'.format(task, out.shape))
class ASPPv3Plus(nn.Module):
"""
ASPP-v3 decoder
"""
def __init__(self,
tasks,
n_classes,
classifier='atrous-v3',
in_channels_low=256,
in_channels_high=2048,
out_f_classifier=64,
atrous_rates=None,
norm=nn.BatchNorm2d,
norm_per_task=True,
squeeze=False,
adapters=False,
):
super(ASPPv3Plus, self).__init__()
print('Initializing ASPP v3 Decoder for multiple tasks')
if atrous_rates is None:
atrous_rates = [6, 12, 18]
out_f_low = 48 * out_f_classifier / 256 # Adapt in case of thinner classifiers
assert (int(out_f_low) == out_f_low)
out_f_low = int(out_f_low)
kwargs_low = {"num_features": int(out_f_low), "affine": True}
kwargs_out = {"num_features": out_f_classifier, "affine": True}
self.tasks = tasks
if classifier == 'atrous-v3':
print('Initializing classifier: ASPP with global features (Deeplab-v3+)')
self.layer5 = AtrousSpatialPyramidPoolingModule(in_f=in_channels_high,
depth=out_f_classifier,
dilation_series=atrous_rates,
tasks=self.tasks,
norm_per_task=norm_per_task,
squeeze=squeeze,
adapters=adapters)
elif classifier == 'conv':
self.layer5 = ConvCoupledSE(tasks=tasks,
process_layers=nn.Conv2d(in_channels_high, out_f_classifier, kernel_size=1,
bias=False),
norm=norm,
norm_kwargs=kwargs_low,
norm_per_task=norm_per_task,
squeeze=squeeze,
adapters=adapters,
reduction=4)
else:
raise NotImplementedError('Choose one of the available classifiers')
self.low_level_reduce = ConvCoupledSE(tasks=tasks,
process_layers=nn.Conv2d(in_channels_low, int(out_f_low), kernel_size=1,
bias=False),
norm=norm,
norm_kwargs=kwargs_low,
norm_per_task=norm_per_task,
squeeze=squeeze,
adapters=adapters,
reduction=4)
self.conv_concat = ConvCoupledSE(tasks=tasks,
process_layers=conv3x3(out_f_classifier + int(out_f_low),
out_f_classifier),
norm=norm,
norm_kwargs=kwargs_out,
norm_per_task=norm_per_task,
squeeze=squeeze,
adapters=adapters)
self.conv_process = ConvCoupledSE(tasks=tasks,
process_layers=conv3x3(out_f_classifier, out_f_classifier),
norm=norm,
norm_kwargs=kwargs_out,
norm_per_task=norm_per_task,
squeeze=squeeze,
adapters=adapters)
self.conv_predict = nn.ModuleDict(
{task: nn.Conv2d(out_f_classifier, n_classes[task], kernel_size=1, bias=True) for task in tasks})
def forward(self, x_low, x, task=None):
x_low = self.low_level_reduce(x_low, task)
x = self.layer5(x, task)
x = F.interpolate(x, size=(x_low.shape[2], x_low.shape[3]), mode='bilinear', align_corners=False)
x = torch.cat((x, x_low), dim=1)
x = self.conv_concat(x, task)
x = self.conv_process(x, task)
features = x
x = self.conv_predict[task](x)
return x, features
def test_aspp():
print('Testing ASPP-v3 decoder')
import fblib.util.visualizepy as viz
tasks = ['edge']
n_classes = {'edge': 1}
in_channels_low = 256
in_channels_high = 2048
out_f_classifier = 64
x_low = torch.rand(2, in_channels_low, 128, 128).requires_grad_()
x_high = torch.rand(2, in_channels_high, 64, 64).requires_grad_()
net = ASPPv3Plus(tasks=tasks,
n_classes=n_classes,
classifier='atrous-v3',
in_channels_high=in_channels_high,
in_channels_low=in_channels_low,
out_f_classifier=out_f_classifier,
norm=nn.BatchNorm2d,
squeeze=True)
x_low, x_high, net = x_low.cuda(), x_high.cuda(), net.cuda()
out = {}
for task in tasks:
out[task], _ = net(x_low, x_high, task=task)
print('Task: {}, Output Shape: {}'.format(task, out[task].shape))
g = viz.make_dot(out, net.state_dict())
g.view(directory='./')
def main():
test_ubernet()
test_aspp()
if __name__ == '__main__':
main()
| astmt-master | fblib/networks/deeplab_multi_task/decoders.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch.nn as nn
class Discriminator(nn.Module):
def __init__(self, in_channels, n_classes):
super(Discriminator, self).__init__()
self.model = nn.Sequential(
nn.Linear(in_channels, 128),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(128, n_classes),
)
def forward(self, x):
x = self.model(x)
return x
class FullyConvDiscriminator(nn.Module):
def __init__(self, in_channels, n_classes, kernel_size=1, depth=1):
super(FullyConvDiscriminator, self).__init__()
padding = (kernel_size - 1) / 2
assert(padding == int(padding))
padding = int(padding)
print('\nInitializing Fully Convolutional Discriminator with depth: {} and kernel size: {}'
.format(depth, kernel_size))
if depth == 1:
self.model = nn.Sequential(
nn.Conv2d(in_channels, n_classes, kernel_size=kernel_size, padding=padding, bias=True))
elif depth == 2:
self.model = nn.Sequential(
nn.Conv2d(in_channels, in_channels, kernel_size=kernel_size, padding=padding, bias=False),
# nn.BatchNorm2d(in_channels),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels, n_classes, kernel_size=kernel_size, padding=padding, bias=True))
def forward(self, x):
x = self.model(x)
return x
class AvePoolDiscriminator(nn.Module):
def __init__(self, in_channels, n_classes):
super(AvePoolDiscriminator, self).__init__()
self.avepool = nn.AdaptiveAvgPool2d(output_size=(1, 1))
self.model = nn.Sequential(
nn.Linear(in_features=in_channels, out_features=in_channels),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(in_features=in_channels, out_features=n_classes),
)
def forward(self, x):
x = self.avepool(x)
x = x.view(x.size(0), -1)
x = self.model(x)
return x
class ConvDiscriminator(nn.Module):
def __init__(self, in_channels, n_classes):
super(ConvDiscriminator, self).__init__()
self.model = nn.Sequential(
nn.Conv2d(in_channels, in_channels, kernel_size=3, padding=1, bias=False, stride=4),
nn.BatchNorm2d(in_channels),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(in_channels, in_channels, kernel_size=3, padding=1, bias=False, stride=4),
nn.BatchNorm2d(in_channels),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(in_channels, in_channels, kernel_size=3, padding=1, bias=False, stride=4),
nn.BatchNorm2d(in_channels),
nn.LeakyReLU(0.2, inplace=True),
nn.AdaptiveAvgPool2d(output_size=(1, 1))
)
self.classifier = nn.Linear(in_features=in_channels, out_features=n_classes)
def forward(self, x):
x = self.model(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
| astmt-master | fblib/networks/deeplab_multi_task/discriminators.py |
astmt-master | fblib/networks/deeplab_multi_task/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.nn import init
from fblib.layers.squeeze import ConvCoupledSE
affine_par = True
class AtrousSpatialPyramidPoolingModule(nn.Module):
"""
Atrous Spatial Pyramid Pooling Module (DeepLab-v3+)
"""
def __init__(self, dilation_series=None, depth=256, in_f=2048, cardinality=1, exist_decoder=True,
tasks=None, squeeze=False, adapters=False, se_after_relu=True, norm_per_task=True):
super(AtrousSpatialPyramidPoolingModule, self).__init__()
if dilation_series is None:
dilation_series = [6, 12, 18]
padding_series = dilation_series
self.bnorm = nn.BatchNorm2d
self.squeeze = squeeze
kwargs = {"num_features": depth, "affine": affine_par}
self.conv2d_list = nn.ModuleList()
# 1x1 convolution
self.conv2d_list.append(
ConvCoupledSE(tasks=tasks,
process_layers=nn.Conv2d(in_f, depth, kernel_size=1, stride=1, bias=False),
norm=self.bnorm,
norm_kwargs=kwargs,
norm_per_task=norm_per_task,
squeeze=self.squeeze,
adapters=adapters,
se_after_relu=se_after_relu))
for dilation, padding in zip(dilation_series, padding_series):
self.conv2d_list.append(
ConvCoupledSE(tasks=tasks,
process_layers=nn.Conv2d(in_f, depth, kernel_size=3, stride=1, padding=padding,
dilation=dilation, bias=False, groups=cardinality),
norm=self.bnorm,
norm_kwargs=kwargs,
norm_per_task=norm_per_task,
squeeze=self.squeeze,
adapters=adapters,
se_after_relu=se_after_relu))
# Global features
self.conv2d_list.append(
ConvCoupledSE(tasks=tasks,
process_layers=[nn.AdaptiveAvgPool2d(output_size=(1, 1)),
nn.Conv2d(in_f, depth, kernel_size=1, stride=1, bias=False)],
norm=self.bnorm,
norm_kwargs=kwargs,
norm_per_task=norm_per_task,
squeeze=self.squeeze,
se_after_relu=se_after_relu))
if exist_decoder:
self.conv2d_final = ConvCoupledSE(tasks=tasks,
process_layers=nn.Conv2d(depth * 5, depth, kernel_size=1,
stride=1, bias=False),
norm=self.bnorm,
norm_kwargs=kwargs,
norm_per_task=norm_per_task,
squeeze=self.squeeze,
adapters=adapters,
se_after_relu=se_after_relu)
else:
self.conv2d_final = nn.Sequential(nn.Conv2d(depth * 5, depth, kernel_size=1, stride=1, bias=True))
def forward(self, x, task=None):
h, w = x.size(2), x.size(3)
interm = []
for i in range(len(self.conv2d_list)):
interm.append(self.conv2d_list[i](x, task))
# Upsample the global features
interm[-1] = F.interpolate(input=interm[-1], size=(h, w), mode='bilinear', align_corners=False)
# Concatenate the parallel streams
out = torch.cat(interm, dim=1)
# Final convolutional layer of the classifier
out = self.conv2d_final(out, task)
return out
class ConvClassifier(nn.Module):
"""
A simple convolutional classifier
"""
def __init__(self, depth=256, in_f=2048, cardinality=1, exist_decoder=True,
tasks=None, squeeze=False, se_after_relu=True, norm_per_task=False):
super(ConvClassifier, self).__init__()
self.bnorm = nn.BatchNorm2d
self.squeeze = squeeze
kwargs = {"num_features": depth, "affine": affine_par}
self.conv2d = ConvCoupledSE(tasks=tasks,
process_layers=nn.Conv2d(in_f, depth, kernel_size=3, stride=1,
padding=1, dilation=1,
bias=False, groups=cardinality),
norm=self.bnom,
norm_kwargs=kwargs,
norm_per_task=norm_per_task,
squeeze=self.squeeze,
se_after_relu=se_after_relu)
if exist_decoder:
self.conv2d_final = ConvCoupledSE(tasks=tasks,
process_layers=nn.Conv2d(depth, depth, kernel_size=3, stride=1,
padding=1, dilation=1, bias=False),
norm=self.bnorm,
norm_kwargs=kwargs,
norm_per_task=norm_per_task,
squeeze=self.squeeze,
se_after_relu=se_after_relu)
else:
self.conv2d_final = nn.Sequential(nn.Conv2d(depth * 5, depth, kernel_size=1, stride=1, bias=True))
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, self.bnorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
init.kaiming_normal_(m.weight)
m.bias.data.zero_()
def forward(self, x, task=None):
x = self.conv2d(x, task)
# Final convolutional layer of the classifier
x = self.conv2d_final(x, task)
return x
| astmt-master | fblib/networks/deeplab_multi_task/classifiers_multitask.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
def jaccard(gt, pred, void_pixels=None):
assert(gt.shape == pred.shape)
if void_pixels is None:
void_pixels = np.zeros_like(gt)
assert(void_pixels.shape == gt.shape)
gt = gt.astype(np.bool)
pred = pred.astype(np.bool)
void_pixels = void_pixels.astype(np.bool)
if np.isclose(np.sum(gt & np.logical_not(void_pixels)), 0) and np.isclose(np.sum(pred & np.logical_not(void_pixels)), 0):
return 1
else:
return np.sum(((gt & pred) & np.logical_not(void_pixels))) / \
np.sum(((gt | pred) & np.logical_not(void_pixels)), dtype=np.float32)
def precision_recall(gt, pred, void_pixels=None):
if void_pixels is None:
void_pixels = np.zeros_like(gt)
gt = gt.astype(np.bool)
pred = pred.astype(np.bool)
void_pixels = void_pixels.astype(np.bool)
tp = ((pred & gt) & ~void_pixels).sum()
fn = ((~pred & gt) & ~void_pixels).sum()
fp = ((pred & ~gt) & ~void_pixels).sum()
prec = tp / (tp + fp + 1e-12)
rec = tp / (tp + fn + 1e-12)
return prec, rec
| astmt-master | fblib/evaluation/jaccard.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import warnings
import cv2
import os.path
import numpy as np
import glob
import json
def eval_albedo(loader, folder):
rmses = []
for i, sample in enumerate(loader):
if i % 500 == 0:
print('Evaluating Albedo: {} of {} objects'.format(i, len(loader)))
# Load result
filename = os.path.join(folder, sample['meta']['image'] + '.png')
pred = cv2.imread(filename).astype(np.float32)[..., ::-1] / 255
label = sample['albedo']
if pred.shape != label.shape:
warnings.warn('Prediction and ground truth have different size. Resizing Prediction..')
pred = cv2.resize(pred, label.shape[::-1], interpolation=cv2.INTER_LINEAR)
rmse_tmp = (label - pred) ** 2
rmse_tmp = np.sqrt(np.mean(rmse_tmp))
rmses.extend([rmse_tmp])
rmses = np.array(rmses)
eval_result = dict()
eval_result['rmse'] = np.mean(rmses)
eval_result = {x: eval_result[x].tolist() for x in eval_result}
return eval_result
def eval_and_store_albedo(database, save_dir, exp_name, overfit=False):
# Dataloaders
if database == 'FSV':
from fblib.dataloaders import fsv as fsv
gt_set = 'test'
db = fsv.FSVGTA(split=gt_set, do_albedo=True, overfit=overfit)
else:
raise NotImplementedError
res_dir = os.path.join(save_dir, exp_name, 'Results_' + database)
base_name = database + '_' + gt_set + '_' + exp_name + '_albedo'
fname = os.path.join(res_dir, base_name + '.json')
# Check if already evaluated
if os.path.isfile(fname):
with open(fname, 'r') as f:
eval_results = json.load(f)
else:
eval_results = eval_albedo(db, os.path.join(res_dir, 'albedo'))
with open(fname, 'w') as f:
json.dump(eval_results, f)
print('Results for Albedo Estimation')
for x in eval_results:
spaces = ''
for j in range(0, 15 - len(x)):
spaces += ' '
print('{0:s}{1:s}{2:.4f}'.format(x, spaces, eval_results[x]))
def main():
from fblib.util.mypath import Path
database = 'FSV'
save_dir = os.path.join(Path.exp_dir(), 'fsv_se/albedo')
# Evaluate all sub-folders
exp_names = glob.glob(save_dir + '/*')
exp_names = [x.split('/')[-1] for x in exp_names]
for exp_name in exp_names:
if os.path.isdir(os.path.join(save_dir, exp_name, 'Results_' + database, 'albedo')):
print('Evaluating: {}'.format(exp_name))
try:
eval_and_store_albedo(database, save_dir, exp_name)
except FileNotFoundError:
print('Results of {} are not ready'.format(exp_name))
if __name__ == '__main__':
main()
| astmt-master | fblib/evaluation/eval_albedo.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import glob
import json
from fblib.util.mypath import Path
def sync_and_evaluate_one_folder(database, save_dir, exp_name, prefix=None, all_tasks_present=False):
# dataset specific parameters
if database == 'BSDS500':
num_req_files = 200
gt_set = ''
elif database == 'PASCALContext':
if all_tasks_present:
num_req_files = 1853
gt_set = 'val_all_tasks_present'
else:
num_req_files = 5105
gt_set = 'val'
elif database == 'NYUD':
num_req_files = 654
gt_set = 'val'
else:
raise NotImplementedError
if prefix is None:
res_exp_name = exp_name
else:
res_exp_name = prefix + '_' + exp_name
# Check whether results of experiments exist
chk_dir = os.path.join(save_dir, exp_name, 'Results_' + database, 'edge')
if not os.path.exists(chk_dir):
print('Experiment {} is not yet ready. Omitting this directory'.format(exp_name))
return
# Check for filenames
fnames = sorted(glob.glob(os.path.join(chk_dir, '*')))
if len(fnames) < num_req_files:
print('Something is wrong with this directory. Check required: {}'.format(exp_name))
return
elif len(fnames) > num_req_files:
print('Already synced: {}'.format(exp_name))
else:
# Seism path
seism_cluster_dir = Path.seism_root_dir()
# rsync to seism
rsync_str = 'rsync -aP {}/ '.format(chk_dir)
rsync_str += '[email protected]:{}/datasets/{}/{} '.format(seism_cluster_dir, database, res_exp_name)
rsync_str += '--exclude=models --exclude=*.txt'
print(rsync_str)
os.system(rsync_str)
# Submit the job
subm_job_str = 'ssh [email protected] "source /home/sgeadmin/BIWICELL/common/settings.sh;' \
'source /home/sgeadmin/BIWICELL/common/settings.sh;'
subm_job_str += 'cp {}/parameters/HED.txt {}/parameters/{}.txt; ' \
''.format(seism_cluster_dir, seism_cluster_dir, res_exp_name)
subm_job_str += 'qsub -N evalFb -t 1-102 {}/eval_in_cluster.py {} read_one_cont_png fb 1 102 {} {}"' \
''.format(seism_cluster_dir, res_exp_name, database, gt_set)
print(subm_job_str)
os.system(subm_job_str)
# Leave the proof of submission
os.system('touch {}/SYNCED_TO_REINHOLD'.format(chk_dir))
def sync_evaluated_results(database, save_dir, exp_name, prefix=None):
if prefix is not None:
res_exp_name = prefix + '_' + exp_name
else:
res_exp_name = exp_name
split = 'val'
# Check whether results of experiment exists
chk_dir = os.path.join(save_dir, exp_name, 'Results_' + database)
if not os.path.exists(chk_dir):
print('Experiment {} is not yet ready. Omitting this directory'.format(exp_name))
return
chk_file = os.path.join(save_dir, exp_name, 'Results_' + database,
database + '_' + split + '_' + exp_name + '_edge.json')
if os.path.isfile(chk_file):
with open(chk_file, 'r') as f:
eval_results = json.load(f)
else:
print('Creating json: {}'.format(res_exp_name))
eval_results = {}
for measure in {'ods_f', 'ois_f', 'ap'}:
tmp_fname = os.path.join(Path.seism_root_dir(), 'results', 'pr_curves', database,
database + '_' + split + '_fb_' + res_exp_name + '_' + measure + '.txt')
if not os.path.isfile(tmp_fname):
print('Result not available')
continue
with open(tmp_fname, 'r') as f:
eval_results[measure] = float(f.read().strip())
# Create edge json file
if eval_results:
print('Saving into .json: {}'.format(chk_file))
with open(chk_file, 'w') as f:
json.dump(eval_results, f)
for measure in eval_results:
print('{}: {}'.format(measure, eval_results[measure]))
def sync_and_evaluate_subfolders(p, database):
print('Starting check in parent directory: {}'.format(p['save_dir_root']))
dirs = os.listdir(p['save_dir_root'])
for exp in dirs:
sync_and_evaluate_one_folder(database=database,
save_dir=p['save_dir_root'],
exp_name=exp,
prefix=p['save_dir_root'].split('/')[-1],
all_tasks_present=(exp.find('mini') >= 0))
def gather_results(p, database):
print('Gathering results: {}'.format(p['save_dir_root']))
dirs = os.listdir(p['save_dir_root'])
for exp in dirs:
sync_evaluated_results(database=database,
save_dir=p['save_dir_root'],
exp_name=exp,
prefix=p['save_dir_root'].split('/')[-1])
def main():
exp_root_dir = os.path.join(Path.exp_dir(), 'pascal_se')
edge_dirs = glob.glob(os.path.join(exp_root_dir, 'edge*'))
p = {}
for edge_dir in edge_dirs:
p['save_dir_root'] = os.path.join(exp_root_dir, edge_dir)
# sync_and_evaluate_subfolders(p, 'NYUD')
gather_results(p, 'PASCALContext')
if __name__ == '__main__':
main()
| astmt-master | fblib/evaluation/eval_edges.py |
astmt-master | fblib/evaluation/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import warnings
import cv2
import os.path
import glob
import json
import numpy as np
from PIL import Image
VOC_CATEGORY_NAMES = ['background',
'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
NYU_CATEGORY_NAMES = ['background',
'wall', 'floor', 'cabinet', 'bed', 'chair',
'sofa', 'table', 'door', 'window', 'bookshelf',
'picture', 'counter', 'blinds', 'desk', 'shelves',
'curtain', 'dresser', 'pillow', 'mirror', 'floor mat',
'clothes', 'ceiling', 'books', 'refridgerator', 'television',
'paper', 'towel', 'shower curtain', 'box', 'whiteboard',
'person', 'night stand', 'toilet', 'sink', 'lamp',
'bathtub', 'bag', 'otherstructure', 'otherfurniture', 'otherprop']
FSV_CATEGORY_NAMES = ['background', 'vehicle', 'object']
def eval_semseg(loader, folder, n_classes=20, has_bg=True):
n_classes = n_classes + int(has_bg)
# Iterate
tp = [0] * n_classes
fp = [0] * n_classes
fn = [0] * n_classes
for i, sample in enumerate(loader):
if i % 500 == 0:
print('Evaluating: {} of {} objects'.format(i, len(loader)))
# Load result
filename = os.path.join(folder, sample['meta']['image'] + '.png')
mask = np.array(Image.open(filename)).astype(np.float32)
gt = sample['semseg']
valid = (gt != 255)
if mask.shape != gt.shape:
warnings.warn('Prediction and ground truth have different size. Resizing Prediction..')
mask = cv2.resize(mask, gt.shape[::-1], interpolation=cv2.INTER_NEAREST)
# TP, FP, and FN evaluation
for i_part in range(0, n_classes):
tmp_gt = (gt == i_part)
tmp_pred = (mask == i_part)
tp[i_part] += np.sum(tmp_gt & tmp_pred & valid)
fp[i_part] += np.sum(~tmp_gt & tmp_pred & valid)
fn[i_part] += np.sum(tmp_gt & ~tmp_pred & valid)
jac = [0] * n_classes
for i_part in range(0, n_classes):
jac[i_part] = float(tp[i_part]) / max(float(tp[i_part] + fp[i_part] + fn[i_part]), 1e-8)
# Write results
eval_result = dict()
eval_result['jaccards_all_categs'] = jac
eval_result['mIoU'] = np.mean(jac)
return eval_result
def eval_and_store_semseg(database, save_dir, exp_name, overfit=False):
# Dataloaders
if database == 'VOC12':
from fblib.dataloaders import pascal_voc as pascal
n_classes = 20
cat_names = VOC_CATEGORY_NAMES
has_bg = True
gt_set = 'val'
db = pascal.VOC12(split=gt_set, do_semseg=True, overfit=overfit)
elif database == 'PASCALContext':
from fblib.dataloaders import pascal_context as pascal_context
n_classes = 20
cat_names = VOC_CATEGORY_NAMES
has_bg = True
gt_set = 'val'
db = pascal_context.PASCALContext(split=gt_set, do_edge=False, do_human_parts=False, do_semseg=True,
do_normals=False, overfit=overfit)
elif database == 'NYUD':
from fblib.dataloaders import nyud as nyud
n_classes = 40
cat_names = NYU_CATEGORY_NAMES
has_bg = True
gt_set = 'val'
db = nyud.NYUD_MT(split=gt_set, do_semseg=True, overfit=overfit)
elif database == 'FSV':
from fblib.dataloaders import fsv as fsv
n_classes = 2
cat_names = FSV_CATEGORY_NAMES
has_bg = True
gt_set = 'test'
db = fsv.FSVGTA(split=gt_set, mini=True, do_semseg=True, overfit=overfit)
else:
raise NotImplementedError
res_dir = os.path.join(save_dir, exp_name, 'Results_' + database)
base_name = database + '_' + gt_set + '_' + exp_name + '_semseg'
fname = os.path.join(res_dir, base_name + '.json')
# Check if already evaluated
if os.path.isfile(fname):
with open(fname, 'r') as f:
eval_results = json.load(f)
else:
eval_results = eval_semseg(db, os.path.join(res_dir, 'semseg'), n_classes=n_classes, has_bg=has_bg)
with open(fname, 'w') as f:
json.dump(eval_results, f)
# Print Results
class_IoU = eval_results['jaccards_all_categs']
mIoU = eval_results['mIoU']
print('\nSemantic Segmentation mIoU: {0:.4f}\n'.format(100 * mIoU))
for i in range(len(class_IoU)):
spaces = ''
for j in range(0, 15 - len(cat_names[i])):
spaces += ' '
print('{0:s}{1:s}{2:.4f}'.format(cat_names[i], spaces, 100 * class_IoU[i]))
def main():
from fblib.util.mypath import Path
database = 'PASCALContext'
save_dir = os.path.join(Path.exp_dir(), 'pascal_se/edge_semseg_human_parts_normals_sal')
# Evaluate all sub-folders
exp_names = glob.glob(save_dir + '/*')
exp_names = [x.split('/')[-1] for x in exp_names]
for exp_name in exp_names:
if os.path.isdir(os.path.join(save_dir, exp_name, 'Results_' + database, 'semseg')):
print('Evaluating: {}'.format(exp_name))
try:
eval_and_store_semseg(database, save_dir, exp_name)
except FileNotFoundError:
print('Results of {} are not ready'.format(exp_name))
if __name__ == '__main__':
main()
| astmt-master | fblib/evaluation/eval_semseg.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import glob
import json
from fblib.util.mypath import Path
def exists_dir(dir_in):
fold_lst = os.listdir(dir_in)
for x in fold_lst:
if os.path.isdir(os.path.join(dir_in, x)):
return True
return False
def parse_folder(exp_root=Path.exp_dir(),
exp_group='pascal_se',
tasks=None,
db_name='PASCALContext',
query='*',
dic={}):
if tasks is None:
tasks = ['edge', 'semseg', 'human_parts', 'normals', 'sal', 'depth']
exp_group_dir = os.path.join(exp_root, exp_group)
dirs = os.listdir(exp_group_dir)
dirs.sort()
best_perf = {task: 0 for task in tasks}
for task in {'normals', 'depth', 'albedo'}:
if task in tasks:
best_perf[task] = 100
# Examine all subdirectories
for d in dirs:
dir_in = os.path.join(exp_group_dir, d)
# No dir or dir without subdirs
if not os.path.isdir(dir_in) or not exists_dir(dir_in):
continue
# If results folder in dir, print results
if ('Results_' + db_name) in os.listdir(dir_in):
perf = {}
task_counter = 0
# Iterate through all tasks
for i, task in enumerate(tasks):
fnames = glob.glob(dir_in+'/Results_' + db_name + '/' + query + task + '.json')
if not fnames:
perf[task] = -1
continue
task_counter += 1
with open(fnames[0], 'r') as f:
data = json.load(f)
if task == 'edge':
perf[task] = 100 * data['ods_f']
if perf[task] > best_perf[task]:
best_perf[task] = perf[task]
elif task == 'semseg':
perf[task] = 100 * data['mIoU']
if perf[task] > best_perf[task]:
best_perf[task] = perf[task]
elif task == 'human_parts':
perf[task] = 100 * data['mIoU']
if perf[task] > best_perf[task]:
best_perf[task] = perf[task]
elif task == 'normals':
perf[task] = data['mean']
if perf[task] < best_perf[task]:
best_perf[task] = perf[task]
elif task == 'depth':
perf[task] = data['rmse']
if perf[task] < best_perf[task]:
best_perf[task] = perf[task]
elif task == 'albedo':
perf[task] = data['rmse']
if perf[task] < best_perf[task]:
best_perf[task] = perf[task]
elif task == 'sal':
perf[task] = 100 * data['mIoU']
if perf[task] > best_perf[task]:
best_perf[task] = perf[task]
perf_str = [task + ' ' + '%06.3f' % perf[task] + ' ' for i, task in enumerate(tasks)]
perf_str = "".join(perf_str)
if task_counter > 0:
print('{}: {}'.format(perf_str, d))
dic[d] = perf
elif 'models' in os.listdir(dir_in):
# Results are not ready yet
continue
else:
# Examine subdirectories recursively
print('\n\n{}\n'.format(d))
parse_folder(exp_group=os.path.join(exp_group, d), tasks=tasks, query=query, db_name=db_name, dic=dic)
print(best_perf)
if __name__ == '__main__':
print('\nResults:')
dic = {}
db = 'PASCALContext'
if db == 'PASCALContext':
parse_folder(exp_group='pascal_se', query='*res50*', db_name='PASCALContext', dic=dic,
tasks=['edge', 'semseg', 'human_parts', 'normals', 'sal'])
elif db == 'NYUD':
parse_folder(exp_group='nyud_se', query='*res101*', db_name='NYUD', dic=dic,
tasks=['edge', 'semseg', 'normals', 'depth'])
elif db == 'FSV':
parse_folder(exp_group='fsv_se', query='*res101*', db_name='FSV', dic=dic,
tasks=['semseg', 'albedo', 'depth'])
elif db == 'VOC12':
parse_folder(exp_group='single_task', query='*mnet*', db_name='VOC12', dic=dic,
tasks=['semseg'])
| astmt-master | fblib/evaluation/parse_eval_results.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import warnings
import cv2
import glob
import json
import os.path
import numpy as np
from PIL import Image
PART_CATEGORY_NAMES = ['background',
'head', 'torso', 'uarm', 'larm', 'uleg', 'lleg']
def eval_human_parts(loader, folder, n_parts=6):
tp = [0] * (n_parts + 1)
fp = [0] * (n_parts + 1)
fn = [0] * (n_parts + 1)
counter = 0
for i, sample in enumerate(loader):
if i % 500 == 0:
print('Evaluating: {} of {} objects'.format(i, len(loader)))
if 'human_parts' not in sample:
continue
# Check for valid pixels
gt = sample['human_parts']
uniq = np.unique(gt)
if len(uniq) == 1 and (uniq[0] == 255 or uniq[0] == 0):
continue
# Load result
filename = os.path.join(folder, sample['meta']['image'] + '.png')
mask = np.array(Image.open(filename)).astype(np.float32)
# Case of a binary (probability) result
if n_parts == 1:
mask = (mask > 0.5 * 255).astype(np.float32)
counter += 1
valid = (gt != 255)
if mask.shape != gt.shape:
warnings.warn('Prediction and ground truth have different size. Resizing Prediction..')
mask = cv2.resize(mask, gt.shape[::-1], interpolation=cv2.INTER_NEAREST)
# TP, FP, and FN evaluation
for i_part in range(0, n_parts + 1):
tmp_gt = (gt == i_part)
tmp_pred = (mask == i_part)
tp[i_part] += np.sum(tmp_gt & tmp_pred & (valid))
fp[i_part] += np.sum(~tmp_gt & tmp_pred & (valid))
fn[i_part] += np.sum(tmp_gt & ~tmp_pred & (valid))
print('Successful evaluation for {} images'.format(counter))
jac = [0] * (n_parts + 1)
for i_part in range(0, n_parts + 1):
jac[i_part] = float(tp[i_part]) / max(float(tp[i_part] + fp[i_part] + fn[i_part]), 1e-8)
# Write results
eval_result = dict()
eval_result['jaccards_all_categs'] = jac
eval_result['mIoU'] = np.mean(jac)
return eval_result
def eval_and_store_human_parts(database, save_dir, exp_name, overfit=False):
# Dataloaders
if database == 'PASCALContext':
from fblib.dataloaders import pascal_context as pascal_context
gt_set = 'val'
db = pascal_context.PASCALContext(split=gt_set, do_edge=False, do_human_parts=True, do_semseg=False,
do_normals=False, do_sal=False, overfit=overfit)
else:
raise NotImplementedError
res_dir = os.path.join(save_dir, exp_name, 'Results_' + database)
base_name = database + '_' + gt_set + '_' + exp_name + '_human_parts'
fname = os.path.join(res_dir, base_name + '.json')
# Check if already evaluated
if os.path.isfile(fname):
with open(fname, 'r') as f:
eval_results = json.load(f)
else:
eval_results = eval_human_parts(db, os.path.join(res_dir, 'human_parts'))
with open(fname, 'w') as f:
json.dump(eval_results, f)
# Print Results
class_IoU = eval_results['jaccards_all_categs']
mIoU = eval_results['mIoU']
print('\nHuman Parts mIoU: {0:.4f}\n'.format(100 * mIoU))
for i in range(len(class_IoU)):
spaces = ''
for j in range(0, 15 - len(PART_CATEGORY_NAMES[i])):
spaces += ' '
print('{0:s}{1:s}{2:.4f}'.format(PART_CATEGORY_NAMES[i], spaces, 100 * class_IoU[i]))
def main():
from fblib.util.mypath import Path
database = 'PASCALContext'
save_dir = os.path.join(Path.exp_dir(), 'pascal_se/edge_semseg_human_parts_normals_sal')
# Evaluate all sub-folders
exp_names = glob.glob(save_dir + '/*')
exp_names = [x.split('/')[-1] for x in exp_names]
for exp_name in exp_names:
if os.path.isdir(os.path.join(save_dir, exp_name, 'Results_' + database, 'human_parts')):
print('Evaluating: {}'.format(exp_name))
try:
eval_and_store_human_parts(database, save_dir, exp_name)
except FileNotFoundError:
print('Results of {} are not ready'.format(exp_name))
if __name__ == '__main__':
main()
| astmt-master | fblib/evaluation/eval_human_parts.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import warnings
import cv2
import os.path
import numpy as np
import glob
import json
def normal_ize(arr):
arr_norm = np.linalg.norm(arr, ord=2, axis=2)[..., np.newaxis] + 1e-12
return arr / arr_norm
def eval_normals(loader, folder):
deg_diff = []
for i, sample in enumerate(loader):
if i % 500 == 0:
print('Evaluating Surface Normals: {} of {} objects'.format(i, len(loader)))
# Check for valid labels
label = sample['normals']
uniq = np.unique(label)
if len(uniq) == 1 and uniq[0] == 0:
continue
# Load result
filename = os.path.join(folder, sample['meta']['image'] + '.png')
pred = 2. * cv2.imread(filename).astype(np.float32)[..., ::-1] / 255. - 1
pred = normal_ize(pred)
if pred.shape != label.shape:
warnings.warn('Prediction and ground truth have different size. Resizing Prediction..')
pred = cv2.resize(pred, label.shape[::-1], interpolation=cv2.INTER_CUBIC)
valid_mask = (np.linalg.norm(label, ord=2, axis=2) != 0)
pred[np.invert(valid_mask), :] = 0.
label[np.invert(valid_mask), :] = 0.
label = normal_ize(label)
deg_diff_tmp = np.rad2deg(np.arccos(np.clip(np.sum(pred * label, axis=2), a_min=-1, a_max=1)))
deg_diff.extend(deg_diff_tmp[valid_mask])
deg_diff = np.array(deg_diff)
eval_result = dict()
eval_result['mean'] = np.mean(deg_diff)
eval_result['median'] = np.median(deg_diff)
eval_result['rmse'] = np.mean(deg_diff ** 2) ** 0.5
eval_result['11.25'] = np.mean(deg_diff < 11.25) * 100
eval_result['22.5'] = np.mean(deg_diff < 22.5) * 100
eval_result['30'] = np.mean(deg_diff < 30) * 100
eval_result = {x: eval_result[x].tolist() for x in eval_result}
return eval_result
def eval_and_store_normals(database, save_dir, exp_name, overfit=False):
# Dataloaders
if database == 'PASCALContext':
from fblib.dataloaders import pascal_context as pascal_context
gt_set = 'val'
db = pascal_context.PASCALContext(split=gt_set, do_edge=False, do_human_parts=False, do_semseg=False,
do_normals=True, overfit=overfit)
elif database == 'NYUD':
from fblib.dataloaders import nyud as nyud
gt_set = 'val'
db = nyud.NYUD_MT(split=gt_set, do_normals=True, overfit=overfit)
else:
raise NotImplementedError
res_dir = os.path.join(save_dir, exp_name, 'Results_' + database)
base_name = database + '_' + gt_set + '_' + exp_name + '_normals'
fname = os.path.join(res_dir, base_name + '.json')
# Check if already evaluated
if os.path.isfile(fname):
with open(fname, 'r') as f:
eval_results = json.load(f)
else:
eval_results = eval_normals(db, os.path.join(res_dir, 'normals'))
with open(fname, 'w') as f:
json.dump(eval_results, f)
print('Results for Surface Normal Estimation')
for x in eval_results:
spaces = ""
for j in range(0, 15 - len(x)):
spaces += ' '
print('{0:s}{1:s}{2:.4f}'.format(x, spaces, eval_results[x]))
def main():
from fblib.util.mypath import Path
database = 'PASCALContext'
save_dir = os.path.join(Path.exp_dir(), 'pascal_se/edge_semseg_human_parts_normals_sal')
# Evaluate all sub-folders
exp_names = glob.glob(save_dir + '/*')
exp_names = [x.split('/')[-1] for x in exp_names]
for exp_name in exp_names:
if os.path.isdir(os.path.join(save_dir, exp_name, 'Results_' + database, 'normals')):
print('Evaluating: {}'.format(exp_name))
try:
eval_and_store_normals(database, save_dir, exp_name)
except FileNotFoundError:
print('Results of {} are not ready'.format(exp_name))
if __name__ == '__main__':
main()
| astmt-master | fblib/evaluation/eval_normals.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import warnings
import cv2
import os.path
import numpy as np
import glob
import json
from PIL import Image
import fblib.evaluation.jaccard as evaluation
def eval_sal(loader, folder, mask_thres=None):
if mask_thres is None:
mask_thres = [0.5]
eval_result = dict()
eval_result['all_jaccards'] = np.zeros((len(loader), len(mask_thres)))
eval_result['prec'] = np.zeros((len(loader), len(mask_thres)))
eval_result['rec'] = np.zeros((len(loader), len(mask_thres)))
for i, sample in enumerate(loader):
if i % 500 == 0:
print('Evaluating: {} of {} objects'.format(i, len(loader)))
# Load result
filename = os.path.join(folder, sample["meta"]["image"] + '.png')
mask = np.array(Image.open(filename)).astype(np.float32) / 255.
gt = sample["sal"]
if mask.shape != gt.shape:
warnings.warn('Prediction and ground truth have different size. Resizing Prediction..')
mask = cv2.resize(mask, gt.shape[::-1], interpolation=cv2.INTER_NEAREST)
for j, thres in enumerate(mask_thres):
gt = (gt > thres).astype(np.float32)
mask_eval = (mask > thres).astype(np.float32)
eval_result['all_jaccards'][i, j] = evaluation.jaccard(gt, mask_eval)
eval_result['prec'][i, j], eval_result['rec'][i, j] = evaluation.precision_recall(gt, mask_eval)
# Average for each thresholds
eval_result['mIoUs'] = np.mean(eval_result['all_jaccards'], 0)
eval_result['mPrec'] = np.mean(eval_result['prec'], 0)
eval_result['mRec'] = np.mean(eval_result['rec'], 0)
eval_result['F'] = 2 * eval_result['mPrec'] * eval_result['mRec'] / \
(eval_result['mPrec'] + eval_result['mRec'] + 1e-12)
# Maximum of averages (maxF, maxmIoU)
eval_result['mIoU'] = np.max(eval_result['mIoUs'])
eval_result['maxF'] = np.max(eval_result['F'])
eval_result = {x: eval_result[x].tolist() for x in eval_result}
return eval_result
def eval_and_store_sal(database, save_dir, exp_name, overfit=False):
# Dataloaders
if database == 'PASCALContext':
from fblib.dataloaders import pascal_context as pascal_context
split = 'val'
db = pascal_context.PASCALContext(split=split, do_edge=False, do_human_parts=False, do_semseg=False,
do_normals=False, do_sal=True, overfit=overfit)
elif database == 'PASCAL-S':
from fblib.dataloaders import pascal_sal as pascal_sal
split = 'all'
db = pascal_sal.PASCALS(overfit=overfit, threshold=None)
else:
raise NotImplementedError
res_dir = os.path.join(save_dir, exp_name, 'Results_' + database)
base_name = database + '_' + split + '_' + exp_name + '_sal'
fname = os.path.join(res_dir, base_name + '.json')
# Check if already evaluated
if os.path.isfile(fname):
with open(fname, 'r') as f:
eval_results = json.load(f)
else:
eval_results = eval_sal(db, os.path.join(res_dir, 'sal'), mask_thres=np.linspace(0.2, 0.9, 15))
with open(fname, 'w') as f:
json.dump(eval_results, f)
print('Results for Saliency Estimation')
print('mIoU: {0:.3f}'.format(eval_results['mIoU']))
print('maxF: {0:.3f}'.format(eval_results['maxF']))
def main():
from fblib.util.mypath import Path
database = 'PASCALContext'
save_dir = os.path.join(Path.exp_dir(), 'pascal_se/edge_semseg_human_parts_normals_sal')
# Evaluate all sub-folders
exp_names = glob.glob(save_dir + '/*')
exp_names = [x.split('/')[-1] for x in exp_names]
for exp_name in exp_names:
if os.path.isdir(os.path.join(save_dir, exp_name, 'Results_' + database, 'sal')):
print('Evaluating: {}'.format(exp_name))
try:
eval_and_store_sal(database, save_dir, exp_name, overfit=False)
except FileNotFoundError:
print('Results of {} are not ready'.format(exp_name))
if __name__ == '__main__':
main()
| astmt-master | fblib/evaluation/eval_sal.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import warnings
import cv2
import os.path
import numpy as np
import glob
import json
import scipy.io as sio
def eval_depth(loader, folder):
rmses = []
log_rmses = []
for i, sample in enumerate(loader):
if i % 500 == 0:
print('Evaluating depth: {} of {} objects'.format(i, len(loader)))
# Load result
filename = os.path.join(folder, sample['meta']['image'] + '.mat')
pred = sio.loadmat(filename)['depth'].astype(np.float32)
label = sample['depth']
if pred.shape != label.shape:
warnings.warn('Prediction and ground truth have different size. Resizing Prediction..')
pred = cv2.resize(pred, label.shape[::-1], interpolation=cv2.INTER_LINEAR)
label[label == 0] = 1e-9
pred[pred <= 0] = 1e-9
valid_mask = (label != 0)
pred[np.invert(valid_mask)] = 0.
label[np.invert(valid_mask)] = 0.
n_valid = np.sum(valid_mask)
log_rmse_tmp = (np.log(label) - np.log(pred)) ** 2
log_rmse_tmp = np.sqrt(np.sum(log_rmse_tmp) / n_valid)
log_rmses.extend([log_rmse_tmp])
rmse_tmp = (label - pred) ** 2
rmse_tmp = np.sqrt(np.sum(rmse_tmp) / n_valid)
rmses.extend([rmse_tmp])
rmses = np.array(rmses)
log_rmses = np.array(log_rmses)
eval_result = dict()
eval_result['rmse'] = np.mean(rmses)
eval_result['log_rmse'] = np.median(log_rmses)
return eval_result
def eval_and_store_depth(database, save_dir, exp_name, overfit=False):
# Dataloaders
if database == 'NYUD':
from fblib.dataloaders import nyud as nyud
gt_set = 'val'
db = nyud.NYUD_MT(split=gt_set, do_depth=True, overfit=overfit)
elif database == 'FSV':
from fblib.dataloaders import fsv as fsv
gt_set = 'test'
db = fsv.FSVGTA(split='test', do_depth=True, overfit=overfit)
else:
raise NotImplementedError
res_dir = os.path.join(save_dir, exp_name, 'Results_' + database)
base_name = database + '_' + gt_set + '_' + exp_name + '_depth'
fname = os.path.join(res_dir, base_name + '.json')
# Check if already evaluated
if os.path.isfile(fname):
with open(fname, 'r') as f:
eval_results = json.load(f)
else:
eval_results = eval_depth(db, os.path.join(res_dir, 'depth'))
with open(fname, 'w') as f:
json.dump(eval_results, f)
print('Results for Depth Estimation')
for x in eval_results:
spaces = ''
for j in range(0, 15 - len(x)):
spaces += ' '
print('{0:s}{1:s}{2:.4f}'.format(x, spaces, eval_results[x]))
def main():
from fblib.util.mypath import Path
database = 'NYUD'
save_dir = os.path.join(Path.exp_dir(), 'nyud_se/edge_semseg_normals_depth')
# Evaluate all sub-folders
exp_names = glob.glob(save_dir + '/*')
exp_names = [x.split('/')[-1] for x in exp_names]
for exp_name in exp_names:
if os.path.isdir(os.path.join(save_dir, exp_name, 'Results_' + database, 'depth')):
print('Evaluating: {}'.format(exp_name))
try:
eval_and_store_depth(database, save_dir, exp_name)
except FileNotFoundError:
print('Results of {} are not ready'.format(exp_name))
if __name__ == '__main__':
main()
| astmt-master | fblib/evaluation/eval_depth.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import os
import random
import shutil
import pandas as pd
import tqdm
if __name__ == "__main__":
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default="/disc/data/dir")
parser.add_argument("--output_dir", type=str, default="/disc/data/dir")
return parser
params = get_parser().parse_args()
copy_mode = 'symlink'
print("Args:{}".format(json.dumps(vars(params))))
dest_ref10k_dir = os.path.join(params.output_dir, "references_10k")
dest_ref990k_dir = os.path.join(params.output_dir, "references_990k")
dest_query_40k_dir = os.path.join(params.output_dir, "queries_40k")
os.makedirs(dest_ref10k_dir, exist_ok=True)
os.makedirs(dest_ref990k_dir, exist_ok=True)
os.makedirs(dest_query_40k_dir, exist_ok=True)
print(f"Creating output directories: {dest_ref10k_dir}, {dest_ref990k_dir}, {dest_query_40k_dir}")
print(f"Copying the reference images")
reference_dir = os.path.join(params.data_dir, "references")
filenames = [f'R{ii:06d}.jpg' for ii in range(1000000)]
csv_path = os.path.join(params.data_dir, "groundtruth_matches.csv")
df = pd.read_csv(csv_path, header=None, names=['Q', 'R'])
rs = df['R'].values.tolist()
rs.sort()
is_img_in_query = {}
for filename in filenames:
is_img_in_query[filename] = False
if len(rs) == 0:
continue
if rs[0] in filename:
is_img_in_query[filename] = True
rs.pop(0)
print(f"Number of reference images that are used in query: {sum(is_img_in_query.values())}")
for filename in tqdm.tqdm(filenames):
img_path = os.path.join(reference_dir, filename)
dest_dir = dest_ref10k_dir if is_img_in_query[filename] else dest_ref990k_dir
if copy_mode == 'symlink':
os.symlink(img_path, os.path.join(dest_dir, filename))
else:
shutil.copy(img_path, os.path.join(dest_dir, filename))
print(f"Copying the query images")
train_dir = os.path.join(params.data_dir, "train")
filenames = [f'T{ii:06d}.jpg' for ii in range(1000000)]
random.seed(0)
filenames = random.sample(filenames, 40000)
for filename in tqdm.tqdm(filenames):
img_path = os.path.join(train_dir, filename)
if copy_mode == 'symlink':
os.symlink(img_path, os.path.join(dest_query_40k_dir, filename))
else:
shutil.copy(img_path, os.path.join(dest_query_40k_dir, filename))
| active_indexing-main | prepare_disc.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import tqdm
import json
import torch
from torch import device
from torchvision import transforms
from activeindex import utils
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if __name__ == '__main__':
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--output_dir", type=str, default='output')
parser.add_argument("--data_dir", type=str, default="/img/data/dir")
parser.add_argument("--model_name", type=str, default="torchscript")
parser.add_argument("--model_path", type=str, default="/path/to/model.torchscript.pt")
parser.add_argument("--resize_size", type=int, default=288, help="Resize images to this size. (Default: 288)")
parser.add_argument("--batch_size", type=int, default=256, help="Batch size.")
return parser
params = get_parser().parse_args()
print("__log__:{}".format(json.dumps(vars(params))))
print('>>> Creating output directory...')
os.makedirs(params.output_dir, exist_ok=True)
print('>>> Building backbone...')
model = utils.build_backbone(path=params.model_path, name=params.model_name)
model.eval()
model.to(device)
print('>>> Creating dataloader...')
NORMALIZE_IMAGENET = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
default_transform = transforms.Compose([
transforms.ToTensor(),
NORMALIZE_IMAGENET,
transforms.Resize((params.resize_size, params.resize_size)),
])
img_loader = utils.get_dataloader(params.data_dir, default_transform, batch_size=params.batch_size, collate_fn=None)
print('>>> Extracting features...')
features = []
with open(os.path.join(params.output_dir, "filenames.txt"), 'w') as f:
with torch.no_grad():
for ii, imgs in enumerate(tqdm.tqdm(img_loader)):
imgs = imgs.to(device)
fts = model(imgs)
features.append(fts.cpu())
for jj in range(fts.shape[0]):
sample_fname = img_loader.dataset.samples[ii*params.batch_size + jj]
f.write(sample_fname + "\n")
print('>>> Saving features...')
features = torch.concat(features, dim=0)
torch.save(features, os.path.join(params.output_dir, 'fts.pth'))
| active_indexing-main | extract_fts.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from augly.image import functional as aug_functional
import torch
from torchvision import transforms
from torchvision.transforms import functional
from . import augment_queries
NORMALIZE_IMAGENET = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
UNNORMALIZE_IMAGENET = transforms.Normalize(mean=[-0.485/0.229, -0.456/0.224, -0.406/0.225], std=[1/0.229, 1/0.224, 1/0.225])
image_std = torch.Tensor([0.229, 0.224, 0.225]).view(-1, 1, 1)
def center_crop(x, scale):
""" Perform center crop such that the target area of the crop is at a given scale
Args:
x: PIL image
scale: target area scale
"""
scale = np.sqrt(scale)
new_edges_size = [int(s*scale) for s in x.size][::-1]
return functional.center_crop(x, new_edges_size)
def resize(x, scale):
""" Perform center crop such that the target area of the crop is at a given scale
Args:
x: PIL image
scale: target area scale
"""
scale = np.sqrt(scale)
new_edges_size = [int(s*scale) for s in x.size][::-1]
return functional.resize(x, new_edges_size)
def psnr(x, y):
"""
Return PSNR
Args:
x, y: Images tensor with imagenet normalization
"""
delta = 255 * (x - y) * image_std.to(x.device)
psnr = 20*np.log10(255) - 10*torch.log10(torch.mean(delta**2))
return psnr
def linf(x, y):
"""
Return Linf distance
Args:
x, y: Images tensor with imagenet normalization
"""
return torch.max(torch.abs(255 * (x - y) * image_std.to(x.device)))
attacks_dict = {
"none": lambda x : x,
"rotation": lambda x, angle: functional.rotate(x, angle, functional.InterpolationMode('bilinear'), expand=True),
"grayscale": functional.rgb_to_grayscale,
"contrast": functional.adjust_contrast,
"brightness": functional.adjust_brightness,
"hue": functional.adjust_hue,
"hflip": functional.hflip,
"vflip": functional.vflip,
"blur": functional.gaussian_blur, # sigma = ksize*0.15 + 0.35 - ksize = (sigma-0.35)/0.15
"jpeg": aug_functional.encoding_quality,
"resize": resize,
"center_crop": center_crop,
"meme_format": aug_functional.meme_format,
"overlay_emoji": aug_functional.overlay_emoji,
"overlay_onto_screenshot": aug_functional.overlay_onto_screenshot,
"auto": augment_queries.augment_img,
}
attacks = [{'attack': 'none'}] \
+ [{'attack': 'auto'}] \
+ [{'attack': 'meme_format'}] \
+ [{'attack': 'overlay_onto_screenshot'}] \
+ [{'attack': 'rotation', 'angle': angle} for angle in [25,90]] \
+ [{'attack': 'center_crop', 'scale': 0.5}] \
+ [{'attack': 'resize', 'scale': 0.5}] \
+ [{'attack': 'blur', 'kernel_size': 11}] \
+ [{'attack': 'jpeg', 'quality': 50}] \
+ [{'attack': 'hue', 'hue_factor': 0.2}] \
+ [{'attack': 'contrast', 'contrast_factor': cf} for cf in [0.5, 2.0]] \
+ [{'attack': 'brightness', 'brightness_factor': bf} for bf in [0.5, 2.0]] \
# more attacks for the full evaluation
attacks_2 = [{'attack': 'rotation', 'angle': jj} for jj in range(-90, 100,10)] \
+ [{'attack': 'center_crop', 'scale': 0.1*jj} for jj in range(1,11)] \
+ [{'attack': 'resize', 'scale': 0.1*jj} for jj in range(1,11)] \
+ [{'attack': 'blur', 'kernel_size': 1+2*jj} for jj in range(1,15)] \
+ [{'attack': 'jpeg', 'quality': 10*jj} for jj in range(1,11)] \
+ [{'attack': 'contrast', 'contrast_factor': 0.5 + 0.1*jj} for jj in range(15)] \
+ [{'attack': 'brightness', 'brightness_factor': 0.5 + 0.1*jj} for jj in range(15)] \
+ [{'attack': 'hue', 'hue_factor': -0.5 + 0.1*jj} for jj in range(0,11)] \
def generate_attacks(img, attacks=attacks):
""" Generate a list of attacked images from a PIL image. """
attacked_imgs = []
for attack in attacks:
attack = attack.copy()
attack_name = attack.pop('attack')
attacked_imgs.append(attacks_dict[attack_name](img, **attack))
return attacked_imgs | active_indexing-main | activeindex/utils_img.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
class JND(nn.Module):
""" https://ieeexplore.ieee.org/document/7885108 """
def __init__(self, preprocess = lambda x: x):
super(JND, self).__init__()
kernel_x = [[-1., 0., 1.], [-2., 0., 2.], [-1., 0., 1.]]
kernel_y = [[1., 2., 1.], [0., 0., 0.], [-1., -2., -1.]]
kernel_lum = [[1, 1, 1, 1, 1], [1, 2, 2, 2, 1], [1, 2, 0, 2, 1], [1, 2, 2, 2, 1], [1, 1, 1, 1, 1]]
kernel_x = torch.FloatTensor(kernel_x).unsqueeze(0).unsqueeze(0)
kernel_y = torch.FloatTensor(kernel_y).unsqueeze(0).unsqueeze(0)
kernel_lum = torch.FloatTensor(kernel_lum).unsqueeze(0).unsqueeze(0)
self.weight_x = nn.Parameter(data=kernel_x, requires_grad=False)
self.weight_y = nn.Parameter(data=kernel_y, requires_grad=False)
self.weight_lum = nn.Parameter(data=kernel_lum, requires_grad=False)
self.preprocess = preprocess
def jnd_la(self, x, alpha=1.0, eps=1e-3):
""" Luminance masking: x must be in [0,255] """
la = F.conv2d(x, self.weight_lum, padding=2) / 32
mask_lum = la <= 127
la[mask_lum] = 17 * (1 - torch.sqrt(la[mask_lum]/127 + eps)) + 3
la[~mask_lum] = 3/128 * (la[~mask_lum] - 127) + 3
return alpha * la
def jnd_cm(self, x, beta=0.117):
""" Contrast masking: x must be in [0,255] """
grad_x = F.conv2d(x, self.weight_x, padding=1)
grad_y = F.conv2d(x, self.weight_y, padding=1)
cm = torch.sqrt(grad_x**2 + grad_y**2)
cm = 16 * cm**2.4 / (cm**2 + 26**2)
return beta * cm
def heatmaps(self, x, clc=0.3):
""" x must be in [0,1] """
x = 255 * self.preprocess(x)
x = 0.299 * x[...,0:1,:,:] + 0.587 * x[...,1:2,:,:] + 0.114 * x[...,2:3,:,:]
la = self.jnd_la(x)
cm = self.jnd_cm(x)
return torch.clamp_min(la + cm - clc * torch.minimum(la, cm), 5)/255 # b 1 h w | active_indexing-main | activeindex/attenuations.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import os
import random
from typing import Any, Dict, List, NamedTuple, Tuple
import augly.image as imaugs
import augly.utils as utils
import numpy as np
from PIL import Image
RNG = np.random.RandomState
rng = np.random.RandomState(0)
ParametersDistributions = NamedTuple
class ParameterDistribution:
"""Define how to sample a parameter"""
def __init__(self, low: Any, high: Any):
self.low = low
self.high = high
def sample(self, rng: RNG) -> Any:
raise NotImplementedError()
class FixedVariable(ParameterDistribution):
def __init__(self, value: Any):
super().__init__(0, 0)
self.value = value
def sample(self, rng: RNG) -> Any:
return self.value
class UniformFloat(ParameterDistribution):
def sample(self, rng: RNG) -> float:
return float(rng.uniform(self.low, self.high))
class UniformInt(ParameterDistribution):
def sample(self, rng: RNG) -> int:
return int(rng.randint(self.low, self.high + 1))
class UniformColor(ParameterDistribution):
def sample(self, rng: RNG) -> Tuple[int, int, int]:
return tuple(int(rng.randint(self.low, self.high)) for _ in range(3))
class UniformChoice(ParameterDistribution):
def __init__(self, choices: List[Any]):
super().__init__(0, 0)
self.choices = choices
def sample(self, rng: RNG) -> Any:
if not self.choices:
return None
index = rng.randint(0, len(self.choices))
return self.choices[index]
class UniformBool(ParameterDistribution):
def __init__(self):
super().__init__(0, 0)
def sample(self, rng: RNG) -> bool:
return bool(UniformInt(0, 1).sample(rng))
class TextChoice(ParameterDistribution):
def sample(self, rng: RNG) -> List[int]:
length = UniformInt(self.low, self.high).sample(rng)
return [UniformInt(0, 10000).sample(rng) for _ in range(length)]
class ListPD(ParameterDistribution):
def __init__(self, pds: List[ParameterDistribution]):
super().__init__(0, 0)
self.pds = pds
def sample(self, rng: RNG) -> List[Any]:
return [pd.sample(rng) for pd in self.pds]
class TuplePD(ParameterDistribution):
def __init__(self, pds: List[ParameterDistribution]):
super().__init__(0, 0)
self.pds = pds
def sample(self, rng: RNG) -> Tuple:
return tuple(pd.sample(rng) for pd in self.pds)
class ExponentialInt(ParameterDistribution):
def __init__(self, scale: float, low: int, high: int):
super().__init__(low, high)
self.scale = scale
def sample(self, rng) -> int:
# if we sample a value larger than `high`, we need to resample a new one
# if we just take the min(x, high), it will change the distribution
while True:
r = rng.exponential(scale=self.scale)
if int(r + self.low) <= self.high:
return int(r + self.low)
class SymmetricFactor(ParameterDistribution):
def sample(self, rng: RNG) -> float:
factor = float(rng.uniform(self.low, self.high))
invert = rng.randint(0, 2)
return 1 / factor if invert else factor
class UniformLeftRightFactor(ParameterDistribution):
def sample(self, rng: np.random.RandomState) -> Tuple[float, float]:
width = float(rng.uniform(self.low, self.high))
left = rng.uniform(0, 1 - width)
right = left + width
return left, right
class MediaFilterParameters(NamedTuple):
"""Contains the parameters to apply a video filter.
This defines a unique and reproducible transformation"""
name: str
kwargs: Dict[str, Any]
def __repr__(self) -> str:
return json.dumps({**{"name": self.name}, **self.kwargs})
class MediaFilterWithPD(NamedTuple):
"""Define a filter and how to sample all its parameters"""
# filter name, must match one the function method in this file
name: str
# must contains only ParameterDistribution attributes
pd: ParametersDistributions
class AspectRatioPD(ParametersDistributions):
ratio: UniformFloat = UniformFloat(0.5, 2.0)
class BlurPD(ParametersDistributions):
radius: UniformFloat = UniformFloat(5.0, 10.0)
class BlurryMaskPD(ParametersDistributions):
background_image: UniformChoice
overlay_size: UniformFloat = UniformFloat(0.3, 0.8)
x_pos: UniformFloat = UniformFloat(0, 1.0)
y_pos: UniformFloat = UniformFloat(0, 1.0)
class BrightnessPD(ParametersDistributions):
factor: UniformFloat = UniformFloat(0.1, 1.9)
class ClipImageSizePD(ParametersDistributions):
min_resolution: UniformChoice = UniformChoice([500])
max_resolution: UniformChoice = UniformChoice([3000000])
class ConvertColorPD(ParametersDistributions):
mode: UniformChoice = UniformChoice(["P"])
colors: UniformInt = UniformInt(2, 16)
class CropPD(ParametersDistributions):
xs: UniformLeftRightFactor = UniformLeftRightFactor(0.3, 0.6)
ys: UniformLeftRightFactor = UniformLeftRightFactor(0.3, 0.6)
class EncodingQualityPD(ParametersDistributions):
quality: UniformInt = UniformInt(5, 25)
class EnhanceEdgesPD(ParametersDistributions):
pass
class GrayscalePD(ParametersDistributions):
pass
class HFlipPD(ParametersDistributions):
pass
class IdentityPD(ParametersDistributions):
pass
class OverlayEmojiPD(ParametersDistributions):
emoji_path: UniformChoice
x_pos: UniformFloat = UniformFloat(0.0, 0.8)
y_pos: UniformFloat = UniformFloat(0.0, 0.8)
opacity: UniformFloat = UniformFloat(0.5, 1.0)
emoji_size: UniformFloat = UniformFloat(0.4, 0.8)
class OverlayOntoImagePD(ParametersDistributions):
background_image: UniformChoice
overlay_size: UniformFloat = UniformFloat(0.3, 0.6)
x_pos: UniformFloat = UniformFloat(0, 0.4)
y_pos: UniformFloat = UniformFloat(0, 0.4)
class OverlayOntoScreenshotPD(ParametersDistributions):
template_filepath: UniformChoice
crop_src_to_fit: UniformChoice = UniformChoice([True])
class OverlayTextPD(ParametersDistributions):
font_file: UniformChoice
text: TextChoice = TextChoice(5, 15)
font_size: UniformFloat = UniformFloat(0.1, 0.3)
color: UniformColor = UniformColor(0, 255)
x_pos: UniformFloat = UniformFloat(0.0, 0.6)
y_pos: UniformFloat = UniformFloat(0.0, 0.6)
class PadSquarePD(ParametersDistributions):
color: UniformColor = UniformColor(0, 255)
class PerspectiveTransformPD(ParametersDistributions):
sigma: UniformFloat = UniformFloat(30.0, 60.0)
crop_out_black_border: UniformChoice = UniformChoice([True])
class PixelizationPD(ParametersDistributions):
ratio: UniformFloat = UniformFloat(0.2, 0.5)
class RotatePD(ParametersDistributions):
degrees: UniformFloat = UniformFloat(-90.0, 90.0)
class SaturationPD(ParametersDistributions):
factor: UniformFloat = UniformFloat(2.0, 5.0)
class ShufflePixelsPD(ParametersDistributions):
factor: UniformFloat = UniformFloat(0.1, 0.3)
def sample(rng: RNG, filter_with_pd: MediaFilterWithPD) -> MediaFilterParameters:
"""Sample for each ParameterDistribution attribute and
return a dict with sampled parameters
"""
kwargs = {key: pdi.sample(rng) for key, pdi in filter_with_pd.pd._asdict().items()}
return MediaFilterParameters(name=filter_with_pd.name, kwargs=kwargs)
def sample_img_filters_parameters(rng: RNG, available_filters: List[MediaFilterWithPD]) -> List[MediaFilterParameters]:
"""Sample parameters for each available filters"""
return [sample(rng, vf) for vf in available_filters]
def get_assets(emoji_dir: str, font_dir: str, screenshot_dir: str) -> Tuple[List[str], List[str], List[str]]:
emojis = []
for fn in utils.pathmgr.ls(emoji_dir):
fp = os.path.join(emoji_dir, fn)
if utils.pathmgr.isdir(fp):
emojis.extend([os.path.join(fp, f) for f in utils.pathmgr.ls(fp)])
fonts = [
os.path.join(font_dir, fn)
for fn in utils.pathmgr.ls(font_dir)
if fn.endswith(".ttf")
]
template_filenames = [
os.path.join(screenshot_dir, fn)
for fn in utils.pathmgr.ls(screenshot_dir)
if fn.split(".")[-1] != "json"
]
return emojis, fonts, template_filenames
emojis, fonts, template_filenames = get_assets(
utils.EMOJI_DIR, utils.FONTS_DIR, utils.SCREENSHOT_TEMPLATES_DIR
)
primitives = {
"color": [
MediaFilterWithPD(name="brightness", pd=BrightnessPD()),
MediaFilterWithPD(name="grayscale", pd=GrayscalePD()),
MediaFilterWithPD(name="saturation", pd=SaturationPD()),
],
"overlay": [
MediaFilterWithPD(
name="overlay_emoji",
pd=OverlayEmojiPD(emoji_path=UniformChoice(emojis)),
),
MediaFilterWithPD(
name="overlay_text", pd=OverlayTextPD(font_file=UniformChoice(fonts))
),
],
"pixel-level": [
MediaFilterWithPD(name="blur", pd=BlurPD()),
MediaFilterWithPD(name="convert_color", pd=ConvertColorPD()),
MediaFilterWithPD(name="encoding_quality", pd=EncodingQualityPD()),
MediaFilterWithPD(name="apply_pil_filter", pd=EnhanceEdgesPD()),
MediaFilterWithPD(name="pixelization", pd=PixelizationPD()),
MediaFilterWithPD(name="shuffle_pixels", pd=ShufflePixelsPD()),
],
"spatial": [
MediaFilterWithPD(name="crop", pd=CropPD()),
MediaFilterWithPD(name="hflip", pd=HFlipPD()),
MediaFilterWithPD(name="change_aspect_ratio", pd=AspectRatioPD()),
MediaFilterWithPD(
name="overlay_onto_screenshot",
pd=OverlayOntoScreenshotPD(
template_filepath=UniformChoice(template_filenames)
),
),
MediaFilterWithPD(name="pad_square", pd=PadSquarePD()),
MediaFilterWithPD(
name="perspective_transform", pd=PerspectiveTransformPD()
),
MediaFilterWithPD(name="rotate", pd=RotatePD()),
],
}
post_filters = []
def augment_img_wrapper(img, rng: RNG = rng, return_params=False):
""" Wrapper for augment_img to handle errors """
try:
return augment_img(img, rng, return_params)
except Exception as e:
print(f"Error augmenting image: {e}")
return img, ["none"]
def augment_img(img, rng: RNG = rng, return_params=False):
"""
Sample augmentation parameters for img.
Args:
img: query image.
"""
# select filters to apply
num_filters = rng.choice(np.arange(1, 5), p=[0.1, 0.2, 0.3, 0.4])
filter_types_to_apply = rng.choice(
np.asarray(list(primitives.keys())), size=num_filters, replace=False
)
filters_to_apply = [
primitives[ftype][rng.randint(0, len(primitives[ftype]))]
for ftype in filter_types_to_apply
]
filters_to_apply += post_filters
# Ensure that crop is in first position if selected and that convert_color is in last position if selected
for j, vf in enumerate(filters_to_apply):
if vf.name == "crop":
filters_to_apply[j], filters_to_apply[0] = (
filters_to_apply[0],
filters_to_apply[j],
)
if vf.name == "convert_color":
filters_to_apply[j], filters_to_apply[-1] = (
filters_to_apply[-1],
filters_to_apply[j],
)
# sample parameters for each filter
all_filters_parameters = sample_img_filters_parameters(
rng, filters_to_apply
)
# apply filters
for j, ftr in enumerate(all_filters_parameters):
aug_func = getattr(imaugs, ftr.name, None)
kwargs = ftr.kwargs
if ftr.name == "crop":
x1, x2 = kwargs.pop("xs")
y1, y2 = kwargs.pop("ys")
kwargs["x1"], kwargs["x2"] = x1, x2
kwargs["y1"], kwargs["y2"] = y1, y2
img = aug_func(image=img, **kwargs)
img = img.convert('RGB')
if return_params:
return img, all_filters_parameters
else:
return img
if __name__ == '__main__':
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--output_dir", type=str, default='output')
parser.add_argument("--data_dir", type=str, default="/img/data/dir/")
parser.add_argument("--seed", type=int, default=42)
return parser
params = get_parser().parse_args()
print("__log__:{}".format(json.dumps(vars(params))))
# set seed
np.random.seed(params.seed)
random.seed(params.seed)
rng = np.random.RandomState(params.seed)
# Load data
print("Loading filenames from {}".format(params.data_dir))
filenames = os.listdir(params.data_dir)
# Generate augmented images
print("Generating augmented images into {}".format(params.output_dir))
augmentations = []
os.makedirs(params.output_dir, exist_ok=True)
for filename in filenames:
img_path = os.path.join(params.data_dir, filename)
img = Image.open(img_path)
img, filters = augment_img(img, rng, return_params=True)
img.convert('RGB').save(os.path.join(params.output_dir, filename), quality=95)
augmentations.append(filters)
print(filename, "[" + ", ".join([str(ftr) for ftr in filters]) + "]")
# break
# Save augmentations
print("Saving augmentations")
with open(os.path.join(params.output_dir, "augmentations.txt"), "a") as f:
for augmentation in augmentations:
line = "[" + ", ".join([str(ftr) for ftr in augmentation]) + "]\n"
f.write(line)
| active_indexing-main | activeindex/augment_queries.py |
active_indexing-main | activeindex/__init__.py |
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import time
from typing import Callable
import faiss
import numpy as np
import torch
from torch import nn
from torchvision.transforms import functional
from . import utils, utils_img
from .attenuations import JND
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
def get_targets(
target: str,
index: faiss.Index,
fts: torch.Tensor,
ivf_centroids: np.ndarray = None
) -> torch.Tensor:
"""
Get the target representations for the features.
Args:
target (str): Target representation to use.
index (faiss.Index): Index to use for retrieval.
fts (torch.Tensor): Features to get the targets for. batch_size x feature_dim
ivf_centroids (np.ndarray): Centroids of the IVF index.
Returns:
targets (torch.Tensor): Target representations for the features. batch_size x feature_dim
"""
if target == 'pq_recons':
targets = index.reconstruct_n(index.ntotal-fts.shape[0], fts.shape[0]) # reconstruct the PQ codes that have just been added
targets = torch.tensor(targets)
elif target == 'ori_ft':
fts.clone()
elif target == 'ivf_cluster':
ivf_D, ivf_I = index.quantizer.search(fts.detach().cpu().numpy(), k=1) # find the closest cluster center for each feature
targets = ivf_centroids.take(ivf_I.flatten(), axis=0) # get the cluster representation for each feature
targets = torch.tensor(targets)
elif target == 'ivf_cluster_half':
ivf_D, ivf_I = index.quantizer.search(fts.detach().cpu().numpy(), k=1)
centroids = ivf_centroids.take(ivf_I.flatten(), axis=0)
targets = (torch.tensor(centroids) + fts.clone() / 2)
else:
raise NotImplementedError(f'Invalid target: {target}')
return targets
def activate_images(
imgs: list[torch.Tensor],
ori_fts: torch.Tensor,
model: nn.Module,
index: faiss.Index,
ivf_centroids: np.ndarray,
attenuation: JND,
loss_f: Callable,
loss_i: Callable,
params: argparse.Namespace
) -> list[torch.Tensor]:
"""
Activate images.
Args:
imgs (list of torch.Tensor): Images to activate. batch_size * [3 x height x width]
model (torch.nn.Module): Model for feature extraction.
index (faiss.Index): Index to use for retrieval.
ivf_centroids (np.ndarray): Centroids of the IVF index.
attenuation (JND): To create Just Noticeable Difference heatmaps.
loss_f (Callable): Loss function to use for the indexation loss.
loss_i (Callable): Loss function to use for the image loss.
params (argparse.Namespace): Parameters.
Returns:
activated images (list of torch.Tensor): Activated images. batch_size * [3 x height x width]
"""
targets = get_targets(params.target, index, ori_fts, ivf_centroids)
targets = targets.to(device)
# Just noticeable difference heatmaps
alpha = torch.tensor([0.072*(1/0.299), 0.072*(1/0.587), 0.072*(1/0.114)])
alpha = alpha[:,None,None].to(device) # 3 x 1 x 1
heatmaps = [params.scaling * attenuation.heatmaps(img) for img in imgs]
# init distortion + optimizer + scheduler
deltas = [1e-6 * torch.randn_like(img).to(device) for img in imgs] # b (1 c h w)
for distortion in deltas:
distortion.requires_grad = True
optim_params = utils.parse_params(params.optimizer)
optimizer = utils.build_optimizer(model_params=deltas, **optim_params)
if params.scheduler is not None:
scheduler = utils.build_scheduler(optimizer=optimizer, **utils.parse_params(params.scheduler))
# begin optim
iter_time = time.time()
log_stats = []
for gd_it in range(params.iterations):
gd_it_time = time.time()
if params.scheduler is not None:
scheduler.step(gd_it)
# perceptual constraints
percep_deltas = [torch.tanh(delta) for delta in deltas] if params.use_tanh else deltas
percep_deltas = [delta * alpha for delta in percep_deltas] if params.scale_channels else percep_deltas
imgs_t = [img + hm * delta for img, hm, delta in zip(imgs, heatmaps, percep_deltas)]
# get features
batch_imgs = [functional.resize(img_t, (params.resize_size, params.resize_size)) for img_t in imgs_t]
batch_imgs = torch.stack(batch_imgs)
fts = model(batch_imgs) # b d
# compute losses
lf = loss_f(fts, targets)
li = loss_i(imgs_t, imgs)
loss = params.lambda_f * lf + params.lambda_i * li
# step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# log stats
psnrs = torch.tensor([utils_img.psnr(img_t, img) for img_t, img in zip(imgs_t, imgs)])
linfs = torch.tensor([utils_img.linf(img_t, img) for img_t, img in zip(imgs_t, imgs)])
log_stats.append({
'gd_it': gd_it,
'loss': loss.item(),
'loss_f': lf.item(),
'loss_i': li.item(),
'psnr': torch.nanmean(psnrs).item(),
'linf': torch.nanmean(linfs).item(),
'lr': optimizer.param_groups[0]['lr'],
'gd_it_time': time.time() - gd_it_time,
'iter_time': time.time() - iter_time,
'max_mem': torch.cuda.max_memory_allocated() / (1024*1024),
'kw': 'optim',
})
if (gd_it+1) % params.log_freq == 0:
print(json.dumps(log_stats[-1]))
# tqdm.tqdm.write(json.dumps(log_stats[-1]))
# perceptual constraints
percep_deltas = [torch.tanh(delta) for delta in deltas] if params.use_tanh else deltas
percep_deltas = [delta * alpha for delta in percep_deltas] if params.scale_channels else percep_deltas
imgs_t = [img + hm * delta for img, hm, delta in zip(imgs, heatmaps, percep_deltas)]
return imgs_t
| active_indexing-main | activeindex/engine.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import functools
import logging
import os
import faiss
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import timm
from timm import optim as timm_optim
from timm import scheduler as timm_scheduler
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision import datasets, models
from torchvision.datasets.folder import default_loader, is_image_file
# Index
def build_index_factory(idx_str, quant, fts_path, idx_path=None) -> faiss.Index:
"""
Builds index from string and fts_path. see https://github.com/facebookresearch/faiss/wiki/The-index-factory
Args:
idx_str: string describing the index
quant: quantization type, either "L2" or "IP" (Inner Product)
fts_path: path to the train features as a torch tensor .pt file
idx_path: path to save the index
"""
fts = torch.load(fts_path)
fts = fts.numpy() # b d
D = fts.shape[-1]
metric = faiss.METRIC_L2 if quant == 'L2' else faiss.METRIC_INNER_PRODUCT
index = faiss.index_factory(D, idx_str, metric)
index.train(fts)
if idx_path is not None:
print(f'Saving Index to {idx_path}...')
faiss.write_index(index, idx_path)
return index
# Arguments helpers
def bool_inst(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise ValueError('Boolean value expected in args')
def parse_params(s):
"""
Parse parameters into a dictionary, used for optimizer and scheduler parsing.
Example:
"SGD,lr=0.01" -> {"name": "SGD", "lr": 0.01}
"""
s = s.replace(' ', '').split(',')
params = {}
params['name'] = s[0]
for x in s[1:]:
x = x.split('=')
params[x[0]]=float(x[1])
return params
# Optimizer and Scheduler
def build_optimizer(name, model_params, **optim_params):
""" Build optimizer from a dictionary of parameters """
tim_optimizers = sorted(name for name in timm_optim.__dict__
if name[0].isupper() and not name.startswith("__")
and callable(timm_optim.__dict__[name]))
torch_optimizers = sorted(name for name in torch.optim.__dict__
if name[0].isupper() and not name.startswith("__")
and callable(torch.optim.__dict__[name]))
if name in tim_optimizers:
return getattr(timm_optim, name)(model_params, **optim_params)
elif name in torch_optimizers:
return getattr(torch.optim, name)(model_params, **optim_params)
raise ValueError(f'Unknown optimizer "{name}", choose among {str(tim_optimizers+torch_optimizers)}')
def build_scheduler(name, optimizer, **lr_scheduler_params):
"""
Build scheduler from a dictionary of parameters
Args:
name: name of the scheduler
optimizer: optimizer to be used with the scheduler
params: dictionary of scheduler parameters
Ex:
CosineLRScheduler, optimizer {t_initial=50, cycle_mul=2, cycle_limit=3, cycle_decay=0.5, warmup_lr_init=1e-6, warmup_t=5}
"""
tim_schedulers = sorted(name for name in timm_scheduler.__dict__
if name[0].isupper() and not name.startswith("__")
and callable(timm_scheduler.__dict__[name]))
torch_schedulers = sorted(name for name in torch.optim.lr_scheduler.__dict__
if name[0].isupper() and not name.startswith("__")
and callable(torch.optim.lr_scheduler.__dict__[name]))
if name in tim_schedulers:
return getattr(timm_scheduler, name)(optimizer, **lr_scheduler_params)
elif hasattr(torch.optim.lr_scheduler, name):
return getattr(torch.optim.lr_scheduler, name)(optimizer, **lr_scheduler_params)
raise ValueError(f'Unknown scheduler "{name}", choose among {str(tim_schedulers+torch_schedulers)}')
# Model
def build_backbone(path, name):
""" Build a pretrained torchvision backbone from its name.
Args:
path: path to the checkpoint, can be an URL
name: "torchscript" or name of the architecture from torchvision (see https://pytorch.org/vision/stable/models.html)
or timm (see https://rwightman.github.io/pytorch-image-models/models/).
Returns:
model: nn.Module
"""
if name == 'torchscript':
model = torch.jit.load(path)
return model
else:
if hasattr(models, name):
model = getattr(models, name)(pretrained=True)
elif name in timm.list_models():
model = timm.models.create_model(name, num_classes=0)
else:
raise NotImplementedError('Model %s does not exist in torchvision'%name)
model.head = nn.Identity()
model.fc = nn.Identity()
if path is not None:
if path.startswith("http"):
checkpoint = torch.hub.load_state_dict_from_url(path, progress=False)
else:
checkpoint = torch.load(path)
state_dict = checkpoint
for ckpt_key in ['state_dict', 'model_state_dict', 'teacher']:
if ckpt_key in checkpoint:
state_dict = checkpoint[ckpt_key]
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
state_dict = {k.replace("backbone.", ""): v for k, v in state_dict.items()}
msg = model.load_state_dict(state_dict, strict=False)
print(msg)
return model
# Data loading
@functools.lru_cache()
def get_image_paths(path):
logging.info(f"Resolving files in: {path}")
paths = []
for path, _, files in os.walk(path):
for filename in files:
paths.append(os.path.join(path, filename))
return sorted([fn for fn in paths if is_image_file(fn)])
class ImageFolder:
"""An image folder dataset without classes"""
def __init__(self, path, transform=None, loader=default_loader):
self.samples = get_image_paths(path)
self.loader = loader
self.transform = transform
def __getitem__(self, idx: int):
assert 0 <= idx < len(self)
img = self.loader(self.samples[idx])
if self.transform:
return self.transform(img)
return img
def __len__(self):
return len(self.samples)
def collate_fn(batch):
""" Collate function for data loader. Allows to have img of different size"""
return batch
def get_dataloader(data_dir, transform, batch_size=128, num_workers=8, collate_fn=collate_fn):
""" Get dataloader for the images in the data_dir. """
dataset = ImageFolder(data_dir, transform=transform)
dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, collate_fn=collate_fn, shuffle=False, pin_memory=True, drop_last=False)
return dataloader
| active_indexing-main | activeindex/utils.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import random
import faiss
import tqdm
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from torchvision import transforms
from torchvision.transforms import functional
from torchvision.utils import save_image
from . import attenuations, augment_queries, utils, utils_img
from .engine import activate_images
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
def get_parser():
parser = argparse.ArgumentParser()
def aa(*args, **kwargs):
group.add_argument(*args, **kwargs)
group = parser.add_argument_group('Experiments parameters')
aa("--output_dir", type=str, default="output/", help="Output directory for logs and images (Default: /output)")
aa("--verbose", type=int, default=1)
aa("--seed", type=int, default=0)
group = parser.add_argument_group('Data parameters')
aa("--fts_training_path", type=str, default="path/to/train/fts.pth")
aa("--fts_reference_path", type=str, default="path/to/train/ref_990k.pth")
aa("--data_dir", type=str, default="/path/to/disc/ref_10k.pth")
aa("--query_nonmatch_dir", type=str, default="/path/to/disc/queries_40k")
aa("--batch_size", type=int, default=16)
aa("--batch_size_eval", type=int, default=128)
aa("--resize_size", type=int, default=288, help="Resize images to this size. (Default: 288)")
group = parser.add_argument_group('Model parameters')
aa("--model_name", type=str, default="torchscript")
aa("--model_path", type=str, default="/path/to/model.torchscript.pt")
group = parser.add_argument_group('Index parameters')
aa("--idx_dir", type=str, default="indexes", help="Directory where to save the index. (Default: index_disc_sscd288)")
aa("--idx_factory", type=str, default="IVF4096,PQ8x8", help="String to create index from index factory. (Default: IVF4096,PQ8x8)")
aa("--quant", type=str, default="L2", help="Quantizer type if IVF (L2, IP, etc.)")
aa("--nprobe", type=int, default=1, help="Number of probes per query if IVF.")
aa("--kneighbors", type=int, default=100, help="Number of nearest neighbors to return")
group = parser.add_argument_group('Optimization parameters')
aa("--iterations", type=int, default=10, help="Number of iterations for image optimization. (Default: 10)")
aa("--optimizer", type=str, default="Adam,lr=1e-0", help="Optimizer to use. (Default: Adam)")
aa("--scheduler", type=str, default=None, help="Scheduler to use. (Default: None)")
aa("--target", type=str, default="pq_recons", help="Target to use. (Default: pq_recons)")
aa("--loss_f", type=str, default="cossim", help="Loss w to use. Choose among mse, cossim (Default: cossim)")
aa("--lambda_f", type=float, default=1.0, help="Weight of the feature loss. (Default: 1.0)")
aa("--lambda_i", type=float, default=1e-2, help="Weight of the image loss. (Default: 1.0)")
group = parser.add_argument_group('Distortion & Attenuation parameters')
aa("--use_attenuation", type=utils.bool_inst, default=True, help="Use heatmap attenuation")
aa("--scaling", type=float, default=3.0, help="Scaling factor for the heatmap attenuation")
aa("--scale_channels", type=utils.bool_inst, default=True, help="Scale the RGB channels of the heatmap attenuation")
aa("--use_tanh", type=utils.bool_inst, default=True, help="Use tanh for the heatmap attenuation")
group = parser.add_argument_group('Evaluation parameters')
aa("--use_attacks_2", type=utils.bool_inst, default=False, help="Use attacks_2 for augmentation evaluation. (Default: False)")
aa("--eval_retrieval", type=utils.bool_inst, default=True, help="Evaluate retrieval. (Default: True)")
aa("--eval_icd", type=utils.bool_inst, default=True, help="Evaluate icd. (Default: True)")
group = parser.add_argument_group('Misc parameters')
aa("--active", type=utils.bool_inst, default=True, help="Activate images")
aa("--save_imgs", type=utils.bool_inst, default=True, help="Save images")
aa("--log_freq", type=int, default=11, help="Log every n iterations. (Default: 1)")
aa("--debug", type=utils.bool_inst, default=False, help="Debug mode. (Default: False)")
return parser
@torch.no_grad()
def eval_retrieval(img_loader, image_indices, transform, model, index, kneighbors, use_attacks_2=False):
"""
Evaluate retrieval on the activated images.
Args:
img_loader (torch.utils.data.DataLoader): Data loader for the images.
image_indices (list): List of ground-truth image indices.
transform (torchvision.transforms): Transform to apply to the images.
model (torch.nn.Module): Model to use for feature extraction.
index (faiss.Index): Index to use for retrieval.
kneighbors (int): Number of nearest neighbors to return.
use_attacks_2 (bool): Use attacks_2 for augmentation evaluation. (Default: False)
Returns:
df (pandas.DataFrame): Dataframe with the results.
"""
logs = []
attacks = utils_img.attacks_2 if use_attacks_2 else utils_img.attacks
base_count = 0
for ii, imgs in enumerate(tqdm.tqdm(img_loader)):
# create attacks for each image of the batch
attacked_imgs = [utils_img.generate_attacks(pil_img, attacks) for pil_img in imgs] # batchsize nattacks
# create batches for each attack
batch_attacked_imgs = [[] for _ in range(len(attacks))] # nattacks 0
for jj, attacked_img_jj in enumerate(attacked_imgs):
for kk in range(len(attacks)): # nattacks 0 -> nattacks batchsize
img_jj_attack_kk = transform(attacked_img_jj[kk]).unsqueeze(0).to(device)
batch_attacked_imgs[kk].append(img_jj_attack_kk)
batch_attacked_imgs = [torch.cat(batch_attacked_img, dim=0) for batch_attacked_img in batch_attacked_imgs] # nattacks batchsize
# iterate over attacks
for kk in range(len(attacks)):
# create attack param
attack = attacks[kk].copy()
attack_name = attack.pop('attack')
param_names = ['attack_param' for _ in range(len(attack.keys()))]
attack_params = dict(zip(param_names,list(attack.values())))
# extract features
fts = model(batch_attacked_imgs[kk])
fts = fts.detach().cpu().numpy()
# retrieve nearest neighbors
retrieved_Ds, retrieved_Is = index.search(fts, k=kneighbors)
# iterate over images of the batch
for jj in range(len(batch_attacked_imgs[kk])):
image_index = image_indices[base_count+jj]
retrieved_D, retrieved_I = retrieved_Ds[jj], retrieved_Is[jj]
rank = [kk for kk in range(len(retrieved_I)) if retrieved_I[kk]==image_index]
rank = rank[0] if rank else len(retrieved_I)
logs.append({
'batch': ii,
'image_index': image_index,
"attack": attack_name,
**attack_params,
'retrieved_distances': retrieved_D,
'retrieved_indices': retrieved_I,
'rank': rank,
'r@1': 1 if rank<1 else 0,
'r@10': 1 if rank<10 else 0,
'r@100': 1 if rank<100 else 0,
'ap': 1/(rank+1),
"kw": "evaluation",
})
# update count of images
base_count += len(imgs)
df = pd.DataFrame(logs).drop(columns='kw')
return df
@torch.no_grad()
def eval_icd(img_loader, img_nonmatch_loader, image_indices, transform, model, index, kneighbors, seed=0):
"""
Evaluate icd on the activated images.
Args:
img_loader (torch.utils.data.DataLoader): Data loader for the images.
img_nonmatch_loader (torch.utils.data.DataLoader): Data loader for the non-matching images.
image_indices (list): List of ground-truth image indices.
transform (torchvision.transforms): Transform to apply to the images.
model (torch.nn.Module): Model to use for feature extraction.
index (faiss.Index): Index to use for retrieval.
kneighbors (int): Number of nearest neighbors to return.
query_nonmatch_dir (str): Directory where the non-matching images are stored.
seed (int): Seed for the random number generator. (Default: 0)
Returns:
df (pandas.DataFrame): Dataframe with the results.
"""
# stats on matching images
rng = np.random.RandomState(seed)
logs = []
ct_match = 0 # count of matching images
for ii, imgs in enumerate(tqdm.tqdm(img_loader)):
# create attack for each image of the batch
attacked_imgs = []
attack_names = []
for jj, pil_img in enumerate(imgs):
attacked_img, aug_params = augment_queries.augment_img_wrapper(pil_img, rng, return_params=True)
attack_name = "[" + ", ".join([str(ftr) for ftr in aug_params])
attacked_img = transform(attacked_img).unsqueeze(0).to(device)
attack_names.append(attack_name)
attacked_imgs.append(attacked_img)
attacked_imgs = torch.cat(attacked_imgs, dim=0)
# extract features
fts = model(attacked_imgs)
fts = fts.detach().cpu().numpy()
# nearest neighbors search
retrieved_Ds, retrieved_Is = index.search(fts, k=kneighbors)
# iterate over images of the batch
for jj in range(len(imgs)):
retrieved_D, retrieved_I = retrieved_Ds[jj], retrieved_Is[jj]
image_index = image_indices[ct_match + jj]
logs.append({
'batch': ii,
'image_index': image_index,
'attack': attack_names[jj],
'retrieved_distances': retrieved_D,
'retrieved_ids': retrieved_I,
"kw": "icd_evaluation",
})
# update count of matching images
ct_match += len(imgs)
# stats non matching images
for ii, imgs in enumerate(tqdm.tqdm(img_nonmatch_loader)):
# create attack for each image of the batch
attacked_imgs = []
attack_names = []
for jj, pil_img in enumerate(imgs):
attacked_img, aug_params = augment_queries.augment_img_wrapper(pil_img, rng, return_params=True)
attack_name = "[" + ", ".join([str(ftr) for ftr in aug_params])
attacked_img = transform(attacked_img).unsqueeze(0).to(device)
attack_names.append(attack_name)
attacked_imgs.append(attacked_img)
attacked_imgs = torch.cat(attacked_imgs, dim=0)
# extract features
fts = model(attacked_imgs)
fts = fts.detach().cpu().numpy()
# nearest neighbors search
retrieved_Ds, retrieved_Is = index.search(fts, k=kneighbors)
# iterate over images of the batch
for jj in range(len(imgs)):
retrieved_D, retrieved_I = retrieved_Ds[jj], retrieved_Is[jj]
logs.append({
'batch': ii,
'image_index': -1,
'attack': attack_names[jj],
'retrieved_distances': retrieved_D,
'retrieved_ids': retrieved_I,
"kw": "icd_evaluation",
})
icd_df = pd.DataFrame(logs).drop(columns='kw')
return icd_df
def main(params):
# Set seeds for reproductibility
torch.manual_seed(params.seed)
torch.cuda.manual_seed_all(params.seed)
np.random.seed(params.seed)
random.seed(params.seed)
# Create the directories
os.makedirs(params.idx_dir, exist_ok=True)
os.makedirs(params.output_dir, exist_ok=True)
imgs_dir = os.path.join(params.output_dir, 'imgs')
os.makedirs(imgs_dir, exist_ok=True)
print(f'>>> Starting. \n \t Index will be saved in {params.idx_dir} - images will be saved in {imgs_dir} - evaluation logs in {params.output_dir}')
# Build Index - see https://github.com/facebookresearch/faiss/wiki/Faiss-indexes
print(f'>>> Building Index')
idx_path = os.path.join(params.idx_dir, f'idx={params.idx_factory}_quant={params.quant}.index')
if os.path.exists(idx_path):
print(f'>>> Loading Index from {idx_path}')
index = faiss.read_index(idx_path)
else:
print(f'>>> Index not found. Building Index with fts from {params.fts_training_path}...')
index = utils.build_index_factory(params.idx_factory, params.quant, params.fts_training_path, idx_path)
index.nprobe = params.nprobe
if 'IVF' in params.idx_factory: # optionally get the centroids
ivf = faiss.extract_index_ivf(index)
ivf_centroids = ivf.quantizer.reconstruct_n(0, ivf.nlist)
else:
ivf_centroids = None
# Adding reference images to the index
print(f'>>> Adding reference images to the index from {params.fts_reference_path}...')
fts = torch.load(params.fts_reference_path)
index.add(fts.detach().cpu().numpy())
n_index_ref = index.ntotal
if 'IVF' in params.idx_factory:
ivf.make_direct_map()
# Build the feature extractor model
print(f'>>> Building backbone from {params.model_path}...')
model = utils.build_backbone(path=params.model_path, name=params.model_name)
model.eval()
model.to(device)
for param in model.parameters():
param.requires_grad = False
# loss for feature
cossim = nn.CosineSimilarity(dim=-1)
pdist = nn.PairwiseDistance(p=2)
def loss_f(ft, target):
if params.loss_f == 'cossim':
dists = -cossim(ft, target)
else:
dists = pdist(ft, target)**2
return torch.mean(dists)
# loss for image
mse = nn.MSELoss()
def loss_i(imgs, imgs_ori):
li = 0
bb = len(imgs)
for ii in range(bb): # imgs do not have same size so we cannot use batch mse
li += mse(imgs[ii], imgs_ori[ii])
return li/bb
# build perceptual attenuation
attenuation = attenuations.JND(preprocess = utils_img.UNNORMALIZE_IMAGENET).to(device)
attenuation.requires_grad = False
# Load images to activate
print(f'>>> Loading images from {params.data_dir}...')
transform = transforms.Compose([
transforms.ToTensor(),
utils_img.NORMALIZE_IMAGENET,
])
transform_with_resize = transforms.Compose([
transforms.ToTensor(),
utils_img.NORMALIZE_IMAGENET,
transforms.Resize((params.resize_size, params.resize_size)),
])
data_loader = utils.get_dataloader(params.data_dir, transform, params.batch_size)
print(f'>>> Activating images...')
all_imgs = []
for it, imgs in enumerate(tqdm.tqdm(data_loader)):
if params.debug and it > 5:
break
imgs = [img.to(device) for img in imgs]
# Add to index
resized_imgs = [functional.resize(img, (params.resize_size, params.resize_size)) for img in imgs]
batch_imgs = torch.stack([img for img in resized_imgs])
fts = model(batch_imgs)
index.add(fts.detach().cpu().numpy())
if 'IVF' in params.idx_factory:
ivf.make_direct_map() # update the direct map if needed
# Activate
if params.active:
imgs = activate_images(imgs, fts, model, index, ivf_centroids, attenuation, loss_f, loss_i, params)
# Save images
for ii, img in enumerate(imgs):
img = torch.clamp(utils_img.UNNORMALIZE_IMAGENET(img), 0, 1)
img = torch.round(255 * img)/255
img = img.detach().cpu()
if params.save_imgs:
save_image(img, os.path.join(imgs_dir, f'{it*params.batch_size + ii:05d}.png'))
else:
all_imgs.append(transforms.ToPILImage()(img))
if params.save_imgs:
# create loader from saved images
img_loader = utils.get_dataloader(imgs_dir, transform=None, batch_size=params.batch_size_eval)
else:
# list of images to list of batches
img_loader = [all_imgs[ii:ii + params.batch_size_eval] for ii in range(0, len(all_imgs), params.batch_size_eval)]
if params.eval_retrieval:
print(f'>>> Evaluating nearest neighbors search...')
image_indices = range(n_index_ref, index.ntotal)
df = eval_retrieval(img_loader, image_indices, transform_with_resize, model, index, params.kneighbors, params.use_attacks_2)
df.to_csv(os.path.join(params.output_dir, 'retr_df.csv'), index=False)
df.fillna(0, inplace=True)
df_mean = df.groupby(['attack', 'attack_param'], as_index=False).mean()
print(f'\n{df_mean}')
if params.eval_icd:
print(f'>>> Evaluating copy detection on query set...')
image_indices = range(n_index_ref, index.ntotal)
img_nonatch_loader = utils.get_dataloader(params.query_nonmatch_dir, transform=None, batch_size=params.batch_size_eval)
icd_df = eval_icd(img_loader, img_nonatch_loader, image_indices, transform_with_resize, model, index, params.kneighbors)
icd_df_path = os.path.join(params.output_dir,'icd_df.csv')
icd_df.to_csv(icd_df_path, index=False)
print(f'\n{icd_df}')
if __name__ == '__main__':
# generate parser / parse parameters
parser = get_parser()
params = parser.parse_args()
# run experiment
main(params)
| active_indexing-main | activeindex/main.py |
Active-3D-Vision-and-Touch-main | __init__.py |
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import setuptools
setuptools.setup(
name="pterotactyl",
version="0.1.0",
author="Facebook AI Research",
description="",
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Artificial Intelligence :: Active Sensing",
],
python_requires=">=3.6",
) | Active-3D-Vision-and-Touch-main | setup.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| Active-3D-Vision-and-Touch-main | pterotactyl/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| Active-3D-Vision-and-Touch-main | pterotactyl/objects/__init__.py |
Active-3D-Vision-and-Touch-main | pterotactyl/objects/hand/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
import torch
from pterotactyl.utility import utils
BASE_MESH_SIZE = 1824
BASE_CHART_SIZE = 25
# replay buffer used for learning RL models over the environment
class ReplayMemory:
def __init__(self, args):
self.args = args
# basic info which might be used by a learning method
# _n denotes observations occuring after the action is perfromed
self.mask = torch.zeros((self.args.mem_capacity, self.args.num_actions))
self.mask_n = torch.zeros((self.args.mem_capacity, self.args.num_actions))
self.actions = torch.zeros((self.args.mem_capacity))
self.rewards = torch.zeros(self.args.mem_capacity)
self.score = torch.zeros(self.args.mem_capacity)
self.score_n = torch.zeros(self.args.mem_capacity)
self.first_score = torch.zeros(self.args.mem_capacity)
if self.args.use_recon:
num_fingers = 1 if self.args.finger else 4
mesh_shape = BASE_MESH_SIZE + (
BASE_CHART_SIZE * self.args.num_grasps * num_fingers
)
self.mesh = torch.zeros((self.args.mem_capacity, mesh_shape, 4))
self.mesh_n = torch.zeros((self.args.mem_capacity, mesh_shape, 4))
if self.args.use_latent:
latent_size = utils.load_model_config(self.args.auto_location)[
0
].encoding_size
self.latent = torch.zeros((self.args.mem_capacity, latent_size))
self.latent_n = torch.zeros((self.args.mem_capacity, latent_size))
self.first_latent = torch.zeros((self.args.mem_capacity, latent_size))
self.position = 0
self.count_seen = 0
# add a set of transitions to the replay buffer
def push(self, action, observation, next_observation, reward):
for i in range(len(action)):
self.actions[self.position] = action[i]
self.rewards[self.position] = reward[i]
self.score[self.position] = observation["score"][i]
self.score_n[self.position] = next_observation["score"][i]
self.first_score[self.position] = observation["first_score"][i]
self.mask[self.position] = observation["mask"][i]
self.mask_n[self.position] = next_observation["mask"][i]
if self.args.use_recon:
self.mesh[self.position] = observation["mesh"][i]
self.mesh_n[self.position] = next_observation["mesh"][i]
if self.args.use_latent:
self.latent[self.position] = observation["latent"][i]
self.latent_n[self.position] = next_observation["latent"][i]
self.first_latent[self.position] = observation["first_latent"][i]
self.count_seen += 1
self.position = (self.position + 1) % self.args.mem_capacity
# sample a set of transitions from the replay buffer
def sample(self):
if (
self.count_seen < self.args.burn_in
or self.count_seen < self.args.train_batch_size
):
return None
indices = np.random.choice(
min(self.count_seen, self.args.mem_capacity), self.args.train_batch_size
)
data = {
"mask": self.mask[indices],
"mask_n": self.mask_n[indices],
"actions": self.actions[indices],
"rewards": self.rewards[indices],
"score": self.score[indices],
"score_n": self.score_n[indices],
"first_score": self.first_score[indices],
}
if self.args.use_recon:
data["mesh"] = self.mesh[indices]
data["mesh_n"] = self.mesh_n[indices]
if self.args.use_latent:
data["latent"] = self.latent[indices]
data["latent_n"] = self.latent_n[indices]
data["first_latent"] = self.first_latent[indices]
return data
# save the replay buffer to disk
def save(self, directory):
data = {
"mask": self.mask,
"mask_n": self.mask_n,
"actions": self.actions,
"rewards": self.rewards,
"score": self.score,
"first_score": self.first_score,
"position": self.position,
"count_seen": self.count_seen,
}
if self.args.use_recon:
data["mesh"] = self.mesh
data["mesh_n"] = self.mesh_n
if self.args.use_latent:
data["latent"] = self.latent
data["latent_n"] = self.latent_n
data["first_latent"] = self.first_latent
temp_path = directory + "_replay_buffer_temp.pt"
full_path = directory + "_replay_buffer.pt"
torch.save(data, temp_path)
os.rename(temp_path, full_path)
# load the replay buffer from the disk
def load(self, directory):
data = torch.load(directory + "_replay_buffer.pt")
self.mask = data["mask"]
self.mask_n = data["mask_n"]
self.actions = data["actions"]
self.actions = data["actions"]
self.rewards = data["rewards"]
self.score = data["score"]
self.first_score = data["first_score"]
self.position = data["position"]
self.count_seen = data["count_seen"]
if self.args.use_recon:
self.mesh = data["mesh"]
self.mesh_n = data["mesh_n"]
if self.args.use_latent:
self.latent = data["latent"]
self.latent_n = data["latent_n"]
self.first_latent = data["first_latent"]
| Active-3D-Vision-and-Touch-main | pterotactyl/policies/replay.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import random
import numpy as np
import torch
import torch.utils.data
from pterotactyl.utility import utils
from pterotactyl.utility import data_loaders
from pterotactyl.reconstruction.touch import model as touch_model
from pterotactyl.reconstruction.vision import model as vision_model
from pterotactyl.reconstruction.autoencoder import model as auto_model
import pterotactyl.objects as objects
from pterotactyl.simulator.scene import sampler
from pterotactyl.simulator.physics import grasping
from pterotactyl import pretrained
class ActiveTouch:
def __init__(self, args):
self.args = args
self.seed(self.args.seed)
self.current_information = {}
self.steps = 0
self.touch_chart_location = os.path.join(
os.path.dirname(objects.__file__), "touch_chart.obj"
)
self.vision_chart_location = os.path.join(
os.path.dirname(objects.__file__), "vision_charts.obj"
)
self.pretrained_recon_models()
self.setup_recon()
self.get_loaders()
self.sampler = sampler.Sampler(
grasping.Agnostic_Grasp, bs=self.args.env_batch_size, vision=False
)
# Fix seeds
def seed(self, seed):
self.seed = seed
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
# get dataloaders
def get_loaders(self):
if not self.args.eval:
self.train_data = data_loaders.mesh_loader_active(
self.args, set_type="RL_train"
)
set_type = "valid"
else:
set_type = "test"
self.valid_data = data_loaders.mesh_loader_active(self.args, set_type=set_type)
def pretrained_recon_models(self):
if self.args.pretrained_recon:
self.args.touch_location = (
os.path.dirname(pretrained.__file__) + "/reconstruction/touch/best/"
)
if self.args.use_img:
if self.args.finger:
self.args.vision_location = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/vision/v_t_p/"
)
self.args.auto_location = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/auto/v_t_p/"
)
else:
self.args.vision_location = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/vision/v_t_g/"
)
self.args.auto_location = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/auto/v_t_g/"
)
else:
if self.args.finger:
self.args.vision_location = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/vision/t_p/"
)
self.args.auto_location = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/auto/t_p/"
)
else:
self.args.vision_location = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/vision/t_g/"
)
self.args.auto_location = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/auto/t_g/"
)
# initialize and load the correct reconstruction models
def setup_recon(self):
self.touch_verts, _ = utils.load_mesh_touch(self.touch_chart_location)
# load predtrained touch prediction model
touch_args, _ = utils.load_model_config(self.args.touch_location)
weights = self.args.touch_location + '/model'
self.touch_prediction = touch_model.Encoder().cuda()
self.touch_prediction.load_state_dict(torch.load(weights))
self.touch_prediction.eval()
# load predtrained vision prediction model
vision_args, _ = utils.load_model_config(self.args.vision_location)
weights = self.args.vision_location + '/model'
self.mesh_info, self.initial_mesh = utils.load_mesh_vision(
vision_args, self.vision_chart_location
)
self.initial_mesh = self.initial_mesh.cuda()
self.n_vision_charts = self.initial_mesh.shape[0]
self.deform = vision_model.Deformation(
self.mesh_info, self.initial_mesh, vision_args
).cuda()
self.deform.load_state_dict(torch.load(weights))
self.deform.eval()
# load predtrained autoencoder model
if self.args.use_latent:
auto_args, _ = utils.load_model_config(self.args.auto_location)
weights = self.args.auto_location + '/model'
self.auto_encoder = auto_model.AutoEncoder(
self.mesh_info, self.initial_mesh, auto_args, only_encode=True
).cuda()
self.auto_encoder.load_state_dict(torch.load(weights), strict=False)
self.auto_encoder.eval()
# reset the environment with new objects
def reset(self, batch):
self.current_data = {}
self.steps = 0
self.current_data["first_score"] = None
self.current_data["batch"] = batch
self.current_data["mask"] = torch.zeros(
[self.args.env_batch_size, self.args.num_actions]
)
self.sampler.load_objects(batch["names"], from_dataset=True)
obs = self.compute_obs()
self.current_data["score"] = obs["score"]
return obs
# take a set in the environment with supplied actions
def step(self, actions):
self.update_masks(actions)
obs = self.compute_obs(actions=actions)
reward = self.current_data["score"] - obs["score"]
self.current_data["score"] = obs["score"]
self.steps += 1
done = self.steps == self.args.budget
return obs, reward, done
# compute the best myopic greedy actions and perfrom them
def best_step(self, greedy_checks=None):
best_actions = [None for _ in range(self.args.env_batch_size)]
best_score = [1000 for _ in range(self.args.env_batch_size)]
if greedy_checks == None or (
greedy_checks is not None and greedy_checks >= self.args.num_actions
):
for i in range(self.args.num_actions):
actions = [i for _ in range(self.args.env_batch_size)]
obs = self.compute_obs(actions)
for e, s in enumerate(obs["score"]):
if s < best_score[e] and self.current_data["mask"][e][i] == 0:
best_actions[e] = actions[e]
best_score[e] = s
else:
possible_actions = [
list(range(self.args.num_actions))
for _ in range(self.args.env_batch_size)
]
for i in range(self.args.env_batch_size):
seen = torch.where(self.current_data["mask"][i] != 0)[0]
actions = list(seen.data.cpu().numpy())
actions.sort()
actions.reverse()
for action in actions:
del possible_actions[i][action]
checks = min(greedy_checks, len(possible_actions[0]))
selected_actions = [
random.sample(possible_actions[i], checks)
for i in range(self.args.env_batch_size)
]
for i in range(checks):
actions = [
selected_actions[j][i] for j in range(self.args.env_batch_size)
]
obs = self.compute_obs(actions)
for e, s in enumerate(obs["score"]):
if s < best_score[e]:
best_actions[e] = actions[e]
best_score[e] = s
actions = np.array(best_actions)
obs, reward, done = self.step(actions)
return actions, obs, reward, done
# check the result of perfroming a specific action
def check_step(self, actions):
obs = self.compute_obs(actions=actions)
return obs
# perfrom a given action and compute the new state observations
def compute_obs(self, actions=None):
with torch.no_grad():
charts = self.get_inputs(actions)
img = self.current_data["batch"]["img"].cuda()
verts, mask = self.deform(img, charts)
if self.args.use_latent:
latent = self.auto_encoder(verts.detach(), mask)
score = self.get_score(
verts, self.current_data["batch"]["gt_points"].cuda()
)
if self.current_data["first_score"] is None:
self.current_data["first_score"] = score
if self.args.use_latent:
self.current_data["first_latent"] = latent.data.cpu()
mesh = torch.cat((verts, mask), dim=-1).data.cpu()
obs = {
"score": score.data.cpu().clone(),
"first_score": self.current_data["first_score"].clone(),
"mask": self.current_data["mask"].data.cpu().clone(),
"names": self.current_data["batch"]["names"],
"mesh": mesh.data.cpu().clone(),
}
if self.args.use_latent:
obs["first_latent"] = self.current_data["first_latent"]
obs["latent"] = latent.data.cpu()
return obs
# compute the Chamfer distance of object predictions
def get_score(self, verts, gt_points):
loss = utils.chamfer_distance(
verts, self.mesh_info["faces"], gt_points, num=self.args.number_points
)
loss = self.args.loss_coeff * loss
return loss.cpu()
# perform a given action and a convert the resulting signals into expected input for the reconstructor
def get_inputs(self, actions=None):
num_fingers = 1 if self.args.finger else 4
# this occurs if a reset is being perfromed
# here the input is defined with not touch information
if actions is None:
self.touch_charts = torch.zeros(
(self.args.env_batch_size, num_fingers, self.args.num_grasps, 25, 3)
).cuda()
self.touch_masks = torch.zeros(
(self.args.env_batch_size, num_fingers, self.args.num_grasps, 25, 1)
).cuda()
self.vision_charts = self.initial_mesh.unsqueeze(0).repeat(
self.args.env_batch_size, 1, 1
)
self.vision_masks = 3 * torch.ones(
self.vision_charts.shape[:-1]
).cuda().unsqueeze(-1)
else:
# perfrom the action
signals = self.sampler.sample(actions, touch_point_cloud=True)
if self.args.finger:
touch = (
torch.FloatTensor(
signals["touch_signal"].data.numpy().astype(np.uint8)
)[:, 1]
.permute(0, 3, 1, 2)
.cuda()
/ 255.0
)
pos = signals["finger_transfrom_pos"][:, 1].cuda()
rot = signals["finger_transform_rot_M"][:, 1].cuda()
ref_frame = {"pos": pos, "rot": rot}
# convert the touch signals to charts
touch_verts = (
self.touch_verts.unsqueeze(0)
.repeat(self.args.env_batch_size, 1, 1)
.cuda()
)
pred_touch_charts = self.touch_prediction(
touch, ref_frame, touch_verts
).contiguous()
# define the touch charts in the input mesh to the reconstructor
for i in range(self.args.env_batch_size):
if signals["touch_status"][i][1] == "touch":
self.touch_charts[i, 0, self.steps] = pred_touch_charts[i]
self.touch_masks[i, 0, self.steps] = 2
elif signals["touch_status"][i][1] == "no_touch":
self.touch_charts[i, 0, self.steps] = (
pos[i].view(1, 1, 3).repeat(1, 25, 1)
)
self.touch_masks[i, 0, self.steps] = 1
else:
self.touch_charts[i, 0, self.steps] = 0
self.touch_masks[i, 0, self.steps] = 0
else:
touch = (
signals["touch_signal"]
.view(-1, 121, 121, 3)
.permute(0, 3, 1, 2)
.cuda()
/ 255.0
)
pos = signals["finger_transfrom_pos"].view(-1, 3).cuda()
rot = signals["finger_transform_rot_M"].view(-1, 3, 3).cuda()
ref_frame = {"pos": pos, "rot": rot}
# convert the touch signals to charts
touch_verts = (
self.touch_verts.unsqueeze(0)
.repeat(self.args.env_batch_size * 4, 1, 1)
.cuda()
)
pred_touch_charts = self.touch_prediction(
touch, ref_frame, touch_verts
).contiguous()
# define the touch charts in the input mesh to the reconstructor
for i in range(self.args.env_batch_size):
for j in range(4):
if signals["touch_status"][i][j] == "touch":
self.touch_charts[i, j, self.steps] = pred_touch_charts[
i * 4 + j
]
self.touch_masks[i, j, self.steps] = 2
elif signals["touch_status"][i][j] == "no_touch":
self.touch_charts[i, j, self.steps] = (
pos[i * 4 + j].view(1, 1, 3).repeat(1, 25, 1)
)
self.touch_masks[i, j, self.steps] = 1
else:
self.touch_charts[i, j, self.steps] = 0
self.touch_masks[i, j, self.steps] = 0
charts = {
"touch_charts": self.touch_charts.view(
self.args.env_batch_size, num_fingers * 5 * 25, 3
).clone(),
"vision_charts": self.vision_charts.clone(),
"touch_masks": self.touch_masks.view(
self.args.env_batch_size, num_fingers * 5 * 25, 1
).clone(),
"vision_masks": self.vision_masks.clone(),
}
return charts
# this is perfromed due to a meoery leak in pybullet where loaded meshes are not properly deleted
def reset_pybullet(self):
self.sampler.disconnect()
del self.sampler
self.sampler = sampler.Sampler(
grasping.Agnostic_Grasp, bs=self.args.env_batch_size, vision=True
)
# update the set of action which have been performed
def update_masks(self, actions):
for i in range(actions.shape[0]):
self.current_data["mask"][i, actions[i]] = 1
| Active-3D-Vision-and-Touch-main | pterotactyl/policies/environment.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
import torch
from pterotactyl.utility import utils
class Latent_Model(nn.Module):
def __init__(self, args):
super().__init__()
self.args = args
latent_size = utils.load_model_config(self.args.auto_location)[0].encoding_size
# for embedding previously performed actions
layers = []
layers.append(nn.Sequential(nn.Linear(self.args.num_actions, 200), nn.ReLU()))
layers.append(nn.Sequential(nn.Linear(200, 100), nn.ReLU()))
layers.append(nn.Sequential(nn.Linear(100, latent_size)))
self.action_model = nn.Sequential(*layers)
# MLP taking as input embedding of actions, a latent embedding of first prediction, and current prediction
# and predicts a value for every action
hidden_sizes = (
[latent_size * 3]
+ [args.hidden_dim for _ in range(args.layers - 1)]
+ [self.args.num_actions]
)
layers = []
for i in range(args.layers):
if i < args.layers - 1:
layers.append(
nn.Sequential(
nn.Linear(hidden_sizes[i], hidden_sizes[i + 1]), nn.ReLU()
)
)
else:
layers.append(
nn.Sequential(nn.Linear(hidden_sizes[i], hidden_sizes[i + 1]))
)
self.model = nn.Sequential(*layers)
self.args = args
def forward(self, obs):
action_input = self.action_model(obs["mask"].float().cuda())
shape_input_1 = obs["latent"].float().cuda()
shape_input_2 = obs["first_latent"].float().cuda()
full_input = torch.cat((action_input, shape_input_1, shape_input_2), dim=-1)
if self.args.normalize:
value = torch.sigmoid(self.model(full_input)) * 2 - 1
elif self.args.use_img:
value = torch.sigmoid(self.model(full_input)) * 6 - 3
else:
value = torch.sigmoid(self.model(full_input)) * 200 - 100
return value
| Active-3D-Vision-and-Touch-main | pterotactyl/policies/supervised/model.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import json
import argparse
from collections import namedtuple
from tqdm import tqdm
from torch.utils.data import DataLoader
import torch
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
import numpy as np
from pterotactyl.policies import environment
from pterotactyl.policies.baselines import baselines
from pterotactyl.policies.supervised import model as learning_model
from pterotactyl.utility import utils
from pterotactyl import pretrained
class Engine:
def __init__(self, args):
self.args = args
self.results_dir = os.path.join("results", args.exp_type, args.exp_id)
if not os.path.exists(self.results_dir):
os.makedirs(self.results_dir)
self.checkpoint_dir = os.path.join(
"experiments/checkpoint/", args.exp_type, args.exp_id
)
if not os.path.exists(self.checkpoint_dir):
os.makedirs(self.checkpoint_dir)
if not self.args.eval:
utils.save_config(self.checkpoint_dir, args)
def __call__(self):
# setup the environment, policy and data
self.env = environment.ActiveTouch(self.args)
self.policy = baselines.even_sampler(self.args)
train_loaders, valid_loaders = self.get_loaders()
self.step = 0
self.models = [
learning_model.Latent_Model(self.args).cuda()
for i in range(self.args.budget)
]
# logging information
writer = SummaryWriter(
os.path.join("experiments/tensorboard/", self.args.exp_type)
)
# evaluate the policy
if self.args.eval:
with torch.no_grad():
self.load(train=False)
self.step = self.args.budget - 1
self.validate(valid_loaders, writer)
return
else:
for i in range(self.args.budget):
params = list(self.models[i].parameters())
self.optimizer = optim.Adam(params, lr=self.args.lr, weight_decay=0)
self.load(train=True)
for model in self.models:
model.eval()
self.epoch = 0
self.best_loss = 10000
self.last_improvement = 0
for j in range(self.args.epoch):
self.train(train_loaders, writer)
with torch.no_grad():
self.validate(valid_loaders, writer)
if self.check_values():
break
self.epoch += 1
self.step += 1
# load data using pytorch dataloader
def get_loaders(self):
if not self.args.eval:
train_loader = DataLoader(
self.env.train_data,
batch_size=self.args.env_batch_size,
shuffle=True,
num_workers=4,
collate_fn=self.env.train_data.collate,
)
else:
train_loader = []
valid_loader = DataLoader(
self.env.valid_data,
batch_size=self.args.env_batch_size,
shuffle=False,
num_workers=4,
collate_fn=self.env.valid_data.collate,
)
return train_loader, valid_loader
def train(self, dataloader, writer):
total_loss = 0
iterations = 0.0
self.models[self.step].train()
for v, batch in enumerate(
tqdm(dataloader, total=min(self.args.train_steps, len(dataloader)))
):
if v >= self.args.train_steps:
break
try:
obs = self.env.reset(batch)
except:
continue
# move to the correct step
with torch.no_grad():
cur_actions = []
for i in range(self.step):
values = self.models[i](obs)
for acts in cur_actions:
for e, act in enumerate(acts):
values[e][act] = 1e10
actions = torch.argmin(values, dim=1)
next_obs, reward, all_done = self.env.step(actions)
obs = next_obs
cur_actions.append(actions)
# predict action values
all_pred_values = self.models[self.step](obs)
pred_values = []
# sample some random actions and compute their value
random_actions = np.random.randint(
50, size=self.args.env_batch_size * 5
).reshape(5, self.args.env_batch_size)
target = []
for actions in random_actions:
temp_obs = self.env.check_step(actions)
if self.args.normalize:
score = (temp_obs["first_score"] - temp_obs["score"]) / temp_obs[
"first_score"
]
else:
score = temp_obs["first_score"] - temp_obs["score"]
cur_pred_values = []
for j, a in enumerate(actions):
cur_pred_values.append(all_pred_values[j, a])
pred_values.append(torch.stack(cur_pred_values))
target.append(score)
target = torch.stack(target).cuda()
pred_values = torch.stack(pred_values)
loss = ((target - pred_values) ** 2).mean()
# backprop
loss.backward()
self.optimizer.step()
# log
message = f"Train || step {self.step + 1 } || Epoch: {self.epoch}, loss: {loss.item():.3f}, b_ptp: {self.best_loss:.3f}"
tqdm.write(message)
total_loss += loss.item()
iterations += 1.0
self.train_loss = total_loss / iterations
writer.add_scalars(
f"train_loss_{self.step}",
{self.args.exp_id: total_loss / iterations},
self.epoch,
)
# perfrom the validation
def validate(self, dataloader, writer):
observations = []
scores = []
actions = []
names = []
self.models[self.step].eval()
valid_length = int(len(dataloader) * 0.2)
for v, batch in enumerate(tqdm(dataloader)):
names += batch["names"]
try:
obs = self.env.reset(batch)
except:
continue
self.policy.reset()
cur_scores = [obs["score"]]
cur_actions = []
for i in range(self.step + 1):
action_values = self.models[i](obs)
for acts in cur_actions:
for e, act in enumerate(acts):
action_values[e][act] = 1e10
action = torch.argmin(action_values, dim=1)
next_obs, _, _ = self.env.step(action)
# record observation
obs = next_obs
cur_scores.append(obs["score"])
cur_actions.append(action.data.cpu())
observations.append(obs["mesh"])
scores.append(torch.stack(cur_scores).permute(1, 0))
actions.append(torch.stack(cur_actions).permute(1, 0))
print_score = (scores[-1][:, -1] / scores[-1][:, 0]).mean()
print_reward = (
(scores[-1][:, 0] - scores[-1][:, -1]) / scores[-1][:, 0]
).mean()
message = f"Valid || score: {print_score:.4f}, "
message += f"reward = {print_reward:.4f}"
tqdm.write(message)
if self.args.visualize and v == 5 and self.args.eval:
meshes = torch.cat(observations, dim=0)[:, :, :3]
utils.visualize_prediction(
self.results_dir, meshes, self.env.mesh_info["faces"], names
)
self.env.reset_pybullet()
scores = torch.cat(scores)
rewards = ((scores[:, 0] - scores[:, -1]) / scores[:, 0]).mean()
current_loss = (scores[:, -1] / scores[:, 0]).mean()
self.current_loss = current_loss
print("*" * 30)
message = f"Total Valid || step {self.step + 1 } || score: {current_loss:.4f}, "
message += f"reward = {rewards.mean():.4f}"
tqdm.write("*" * len(message))
tqdm.write(message)
tqdm.write("*" * len(message))
if self.args.visualize and self.args.eval:
actions = torch.stack(actions).view(-1, self.args.budget)
utils.visualize_actions(self.results_dir, actions, self.args)
if not self.args.eval:
writer.add_scalars(
f"valid_loss_{self.step}", {self.args.exp_id: current_loss}, self.epoch
)
def check_values(self):
if self.best_loss >= self.current_loss:
improvement = self.best_loss - self.current_loss
print(f"Saving with {improvement:.3f} improvement on Validation Set ")
self.best_loss = self.current_loss
self.last_improvement = 0
self.save()
return False
else:
self.last_improvement += 1
if self.last_improvement >= self.args.patience:
print(f"Over {self.args.patience} steps since last imporvement")
print("Moving to next step or exiting")
return True
def load(self, train=False):
if self.args.pretrained:
if self.args.use_img:
if self.args.finger:
location = (
os.path.dirname(pretrained.__file__)
+ "/policies/supervised/v_t_p"
)
else:
location = (
os.path.dirname(pretrained.__file__)
+ "/policies/supervised/v_t_g"
)
else:
if self.args.finger:
location = (
os.path.dirname(pretrained.__file__)
+ "/policies/supervised/t_p"
)
else:
location = (
os.path.dirname(pretrained.__file__)
+ "/policies/supervised/t_g"
)
config_location = f"{location}/config.json"
with open(config_location) as json_file:
data = json.load(json_file)
data["auto_location"] = self.args.auto_location
data["eval"] = True
data["visualize"] = self.args.visualize
self.args = namedtuple("ObjectName", data.keys())(*data.values())
self.models = [
learning_model.Latent_Model(self.args).cuda()
for i in range(self.args.budget)
]
for i in range(self.args.budget):
self.models[i].load_state_dict(torch.load(location + f"/model_{i}"))
else:
if train:
for i in range(self.step):
self.models[i].load_state_dict(
torch.load(self.checkpoint_dir + f"/model_{i}")
)
else:
for i in range(self.args.budget):
self.models[i].load_state_dict(
torch.load(self.checkpoint_dir + f"/model_{i}")
)
def save(self):
torch.save(
self.models[self.step].state_dict(),
self.checkpoint_dir + f"/model_{self.step}",
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--limit_data",
action="store_true",
default=False,
help="use less data, for debugging.",
)
parser.add_argument(
"--finger", action="store_true", default=False, help="use only one finger."
)
parser.add_argument(
"--touch_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/touch/best/",
help="the location of the touch part prediction.",
)
parser.add_argument(
"--vision_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/vision/t_p/",
help="the location of the touch part prediction.",
)
parser.add_argument(
"--auto_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/auto/t_p/",
help="the location of the autoencoder part prediction.",
)
parser.add_argument(
"--number_points",
type=int,
default=30000,
help="number of points sampled for the chamfer distance.",
)
parser.add_argument(
"--epoch", type=int, default=3000, help="number of epochs per step"
)
parser.add_argument(
"--seed", type=int, default=0, help="Setting for the random seed."
)
parser.add_argument(
"--env_batch_size", type=int, default=3, help="Size of the batch."
)
parser.add_argument(
"--use_img", action="store_true", default=False, help="To use the image."
)
parser.add_argument(
"--loss_coeff", type=float, default=9000.0, help="Coefficient for loss term."
)
parser.add_argument(
"--num_grasps", type=int, default=5, help="Number of grasps to train with. "
)
parser.add_argument(
"--num_actions", type=int, default=50, help="number of action options"
)
parser.add_argument(
"--eval", action="store_true", default=False, help="for evaluating on test set"
)
parser.add_argument(
"--budget", type=int, default=5, help="number of graspsp to perform"
)
parser.add_argument(
"--exp_id", type=str, default="test", help="The experiment name."
)
parser.add_argument(
"--exp_type", type=str, default="test", help="The experiment type."
)
parser.add_argument(
"--layers", type=int, default=4, help="Number of layers in the q network"
)
parser.add_argument(
"--patience",
type=int,
default=25,
help="number of epochs without progress before stopping",
)
parser.add_argument(
"--training_actions",
type=int,
default=5,
help="number of action values learned for each object in each iteration",
)
parser.add_argument(
"--hidden_dim",
type=int,
default=200,
help="hidden dimension size in layers in the q network",
)
parser.add_argument(
"--train_steps",
type=int,
default=200,
help="number of training steps per epoch",
)
parser.add_argument(
"--normalize", type=int, default=0, help="number of training steps per epoch"
)
parser.add_argument(
"--lr", type=float, default=0.001, help="Initial learning rate."
)
parser.add_argument(
"--visualize",
action="store_true",
default=False,
help="visualize predictions and actions while evaluating",
)
parser.add_argument(
"--pretrained_recon",
action="store_true",
default=False,
help="use the pretrained reconstruction models to train",
)
parser.add_argument(
"--pretrained",
action="store_true",
default=False,
help="use the pretrained policy",
)
args = parser.parse_args()
args.use_recon = False
args.use_latent = True
trainer = Engine(args)
trainer()
| Active-3D-Vision-and-Touch-main | pterotactyl/policies/supervised/train.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import random
import torch
import torch.nn as nn
import torch.optim as optim
from pterotactyl.policies.DDQN import model
from pterotactyl.policies.baselines import baselines
# DDQN training module
class DDQN(nn.Module):
def __init__(self, args, adj_info, replay):
super().__init__()
self.args = args
self.model = self.get_model(adj_info)
self.replay = replay
self.optimizer = optim.Adam(self.model.parameters(), lr=args.lr)
self.args = args
self.random_sampler = baselines.random_sampler(self.args)
# set the value of perfromaed action to never be selected
def penalise_actions(self, values, obs):
values[obs["mask"] > 0] = -1e10
return values
# select the model type required
def get_model(self, adj):
if self.args.pretrained:
if self.args.use_latent:
if self.args.use_img:
if self.args.finger:
self.args.hidden_dim = 300
self.args.layers = 5
else:
self.args.hidden_dim = 300
self.args.layers = 5
else:
if self.args.finger:
self.args.hidden_dim = 300
self.args.layers = 5
else:
self.args.hidden_dim = 300
self.args.layers = 2
else:
if self.args.use_img:
if self.args.finger:
self.args.hidden_dim = 100
self.args.layers = 5
else:
self.args.hidden_dim = 100
self.args.layers = 5
else:
if self.args.finger:
self.args.hidden_dim = 100
self.args.layers = 5
else:
self.args.hidden_dim = 100
self.args.layers = 2
if self.args.use_latent:
return model.Latent_Model(self.args).cuda()
elif self.args.use_recon:
return model.Graph_Model(self.args, adj).cuda()
else:
print("No Model type selected")
exit()
# decrease the epsilon value
def update_epsilon(self, epsilon, args):
return max(args.epsilon_end, epsilon * args.epsilon_decay)
# add the observed transition to the replay buffer
def add_experience(self, action, observation, next_observation, reward):
self.replay.push(action, observation, next_observation, reward)
# update the parameters of the model using DDQN update rule
def update_parameters(self, target_net):
self.model.train()
batch = self.replay.sample()
if batch is None:
return None
# get observations
not_done_mask = batch["mask"].cuda().sum(dim=1) < self.args.budget - 1
actions = batch["actions"].cuda()
rewards = batch["rewards"].cuda()
cur_score = batch["score"].cuda()
first_score = batch["first_score"].cuda()
# normalize if needed
if self.args.normalization == "first":
rewards = rewards / first_score
elif self.args.normalization == "current":
rewards = rewards / cur_score
# Standard DDQN update rule
all_q_values_cur = self.forward(batch, penalize=False)
q_values = all_q_values_cur.gather(1, actions.unsqueeze(1).long()).squeeze()
with torch.no_grad():
best_next_action = self.forward(batch, next=True).detach().max(1)[1]
target_values = target_net.forward(
batch, next=True, penalize=False
).detach()
all_q_values_next = torch.zeros((q_values.shape[0])).cuda()
for i in range(q_values.shape[0]):
if not_done_mask[i]:
all_q_values_next[i] = target_values[i][best_next_action[i]]
target_values = (self.args.gamma * all_q_values_next) + rewards
loss = ((q_values - target_values) ** 2).mean()
# backprop
self.optimizer.zero_grad()
loss.backward()
for param in self.parameters():
if param.grad is not None:
param.grad.data.clamp_(-1, 1)
self.optimizer.step()
return loss.item()
def forward(self, obs, next=False, penalize=True):
value = self.model(obs, next=next)
if penalize:
value = self.penalise_actions(value, obs)
return value
def get_action(self, obs, eps_threshold, give_random=False):
sample = random.random()
if sample < eps_threshold or give_random:
return self.random_sampler.get_action(obs["mask"])
else:
with torch.no_grad():
self.model.eval()
q_values = self(obs)
actions = torch.argmax(q_values, dim=1).data.cpu().numpy()
return actions
| Active-3D-Vision-and-Touch-main | pterotactyl/policies/DDQN/ddqn.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch.nn as nn
import torch
from torch.nn.parameter import Parameter
import torch.nn.functional as F
import numpy as np
from pterotactyl.utility import utils
# DDQN Q network which makes use of a pertrained latent space of predicted objects
class Latent_Model(nn.Module):
def __init__(self, args):
super().__init__()
self.args = args
latent_size = utils.load_model_config(self.args.auto_location)[0].encoding_size
# for embedding previously performed actions
layers = []
layers.append(nn.Sequential(nn.Linear(self.args.num_actions, 200), nn.ReLU()))
layers.append(nn.Sequential(nn.Linear(200, 100), nn.ReLU()))
layers.append(nn.Sequential(nn.Linear(100, latent_size)))
self.action_model = nn.Sequential(*layers)
# MLP taking as input embedding of actions, a latent embedding of first prediction, and current prediction
# and predicts a value for every action
hidden_sizes = (
[latent_size * 3]
+ [args.hidden_dim for _ in range(args.layers - 1)]
+ [self.args.num_actions]
)
layers = []
for i in range(args.layers):
if i < args.layers - 1:
layers.append(
nn.Sequential(
nn.Linear(hidden_sizes[i], hidden_sizes[i + 1]), nn.ReLU()
)
)
else:
layers.append(
nn.Sequential(nn.Linear(hidden_sizes[i], hidden_sizes[i + 1]))
)
self.model = nn.Sequential(*layers)
self.args = args
def forward(self, obs, next=False):
if next:
action_input = self.action_model(obs["mask_n"].float().cuda())
shape_input_1 = obs["latent_n"].float().cuda()
else:
action_input = self.action_model(obs["mask"].float().cuda())
shape_input_1 = obs["latent"].float().cuda()
shape_input_2 = obs["first_latent"].float().cuda()
full_input = torch.cat((action_input, shape_input_1, shape_input_2), dim=-1)
value = self.model(full_input)
return value
# DDQN Q network which makes use of full mesh prediction
class Graph_Model(nn.Module):
def __init__(self, args, adj):
super().__init__()
self.adj = adj["adj"].data.cpu().cuda()
self.args = args
self.num_layers = args.layers
input_size = 100
# for embedding previously performed actions
layers = []
layers.append(nn.Sequential(nn.Linear(50, 200), nn.ReLU()))
layers.append(nn.Sequential(nn.Linear(200, 100), nn.ReLU()))
layers.append(nn.Sequential(nn.Linear(100, input_size)))
self.action_model = nn.Sequential(*layers)
# embedding of vertex positions and masks
self.positional_embedding = Positional_Encoder(input_size)
self.mask_embedding = Mask_Encoder(input_size)
# GCN for predicting actions values from input mesh
hidden_sizes = (
[input_size * 3]
+ [args.hidden_dim for _ in range(args.layers - 1)]
+ [self.args.num_actions]
)
layers = []
for i in range(args.layers):
layers.append(
GCN_layer(
hidden_sizes[i],
hidden_sizes[i + 1],
cut=self.args.cut,
do_cut=(i != self.num_layers - 1),
)
)
self.layers = nn.ModuleList(layers)
def forward(self, obs, next=False):
if next:
action_embedding = self.action_model(obs["mask_n"].float().cuda())
mesh = obs["mesh_n"][:, :, :3].float().cuda()
mask = obs["mesh_n"][:, :, 3:].float().cuda()
else:
action_embedding = self.action_model(obs["mask"].float().cuda())
mesh = obs["mesh"][:, :, :3].float().cuda()
mask = obs["mesh"][:, :, 3:].float().cuda()
positional_embedding = self.positional_embedding(mesh)
mask_embedding = self.mask_embedding(mask)
action_embedding = action_embedding.unsqueeze(1).repeat(1, mesh.shape[1], 1)
vertex_features = torch.cat(
(action_embedding, positional_embedding, mask_embedding), dim=-1
)
# iterate through GCN layers
x = self.layers[0](vertex_features, self.adj, F.relu)
for i in range(1, self.num_layers):
x = self.layers[i](
x, self.adj, F.relu if (i != self.num_layers - 1) else lambda x: x
)
value = torch.max(x, dim=1)[0]
return value
# Graph convolutional network layer
class GCN_layer(nn.Module):
def __init__(self, in_features, out_features, cut=0.33, do_cut=True):
super(GCN_layer, self).__init__()
self.weight = Parameter(torch.Tensor(1, in_features, out_features))
self.bias = Parameter(torch.Tensor(out_features))
self.reset_parameters()
self.cut_size = cut
self.do_cut = do_cut
def reset_parameters(self):
stdv = 6.0 / math.sqrt((self.weight.size(1) + self.weight.size(0)))
stdv *= 0.3
self.weight.data.uniform_(-stdv, stdv)
self.bias.data.uniform_(-0.1, 0.1)
def forward(self, features, adj, activation):
features = torch.matmul(features, self.weight)
# uf we want to only share a subset of features with neighbors
if self.do_cut:
length = round(features.shape[-1] * self.cut_size)
output = torch.matmul(adj, features[:, :, :length])
output = torch.cat((output, features[:, :, length:]), dim=-1)
output[:, :, :length] += self.bias[:length]
else:
output = torch.matmul(adj, features)
output = output + self.bias
return activation(output)
# encode the positional information of vertices using Nerf Embeddings
class Positional_Encoder(nn.Module):
def __init__(self, input_size):
super(Positional_Encoder, self).__init__()
layers = []
layers.append(
nn.Linear(63, input_size // 4)
) # 10 nerf layers + original positions
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Linear(input_size // 4, input_size // 2))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Linear(input_size // 2, input_size))
self.model = nn.Sequential(*layers)
# apply nerf embedding of the positional information
def nerf_embedding(self, points):
embeddings = []
for i in range(10):
if i == 0:
embeddings.append(torch.sin(np.pi * points))
embeddings.append(torch.cos(np.pi * points))
else:
embeddings.append(torch.sin(np.pi * 2 * i * points))
embeddings.append(torch.cos(np.pi * 2 * i * points))
embeddings = torch.cat(embeddings, dim=-1)
return embeddings
def forward(self, positions):
shape = positions.shape
positions = positions.contiguous().view(shape[0] * shape[1], -1)
# combine nerf embedding with origional positions
positions = torch.cat((self.nerf_embedding((positions)), positions), dim=-1)
embeding = self.model(positions).view(shape[0], shape[1], -1)
return embeding
# embedding network for vetex masks
class Mask_Encoder(nn.Module):
def __init__(self, input_size):
super(Mask_Encoder, self).__init__()
layers_mask = []
layers_mask.append(nn.Embedding(4, input_size))
self.model = nn.Sequential(*layers_mask)
def forward(self, mask):
shape = mask.shape
mask = mask.contiguous().view(-1, 1)
embeding_mask = self.model(mask.long()).view(shape[0], shape[1], -1)
return embeding_mask
| Active-3D-Vision-and-Touch-main | pterotactyl/policies/DDQN/model.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
import torch
import argparse
from submitit.helpers import Checkpointable
from tqdm import tqdm
from pterotactyl.policies.DDQN import ddqn
from pterotactyl.policies import environment
from pterotactyl.policies import replay
from pterotactyl.utility import utils
from pterotactyl import pretrained
# module for training the DDQN models
class Engine(Checkpointable):
def __init__(self, args):
self.args = args
self.steps = 0
self.episode = 0
self.epoch = 0
self.cur_loss = 10000
self.best_loss = 10000
self.epsilon = self.args.epsilon_start
self.results_dir = os.path.join("results", args.exp_type, args.exp_id)
if not os.path.exists(self.results_dir):
os.makedirs(self.results_dir)
self.checkpoint_dir = os.path.join(
"experiments/checkpoint/", self.args.exp_type, self.args.exp_id
)
if not os.path.exists((self.checkpoint_dir)):
os.makedirs(self.checkpoint_dir)
utils.save_config(self.checkpoint_dir, args)
def __call__(self):
# initialize the learning environment
self.env = environment.ActiveTouch(self.args)
self.replay_memory = replay.ReplayMemory(self.args)
self.policy = ddqn.DDQN(self.args, self.env.mesh_info, self.replay_memory)
self.target_net = ddqn.DDQN(self.args, self.env.mesh_info, None)
self.target_net.load_state_dict(self.policy.state_dict())
self.target_net.eval()
self.writer = SummaryWriter(
os.path.join("experiments/tensorboard/", self.args.exp_type)
)
self.window_size = 1000
self.ave_reward = torch.zeros((self.window_size)).cuda()
self.ave_recon = torch.zeros((self.window_size)).cuda()
train_loader, valid_loaders = self.get_loaders()
if self.args.eval:
self.load(best=True)
self.validate(valid_loaders)
return
self.resume()
# training loop
for epoch in range(self.epoch, self.args.epochs):
self.train(train_loader)
self.env.reset_pybullet()
if self.steps >= self.args.burn_in:
with torch.no_grad():
self.validate(valid_loaders)
self.env.reset_pybullet()
self.check_values_and_save()
self.epoch += 1
# load the environment data into pytorch dataloaders
def get_loaders(self):
if self.args.eval:
train_loader = ""
else:
train_loader = DataLoader(
self.env.train_data,
batch_size=self.args.env_batch_size,
shuffle=True,
num_workers=4,
collate_fn=self.env.train_data.collate,
)
valid_loader = DataLoader(
self.env.valid_data,
batch_size=self.args.env_batch_size,
shuffle=False,
num_workers=4,
collate_fn=self.env.valid_data.collate,
)
return train_loader, valid_loader
# training iteration
def train(self, dataloader):
for v, batch in enumerate(tqdm(dataloader, total=self.args.train_steps)):
if v > self.args.train_steps - 1:
break
obs = self.env.reset(batch)
all_done = False
total_reward = 0
while not all_done:
# update epsilon
if self.steps >= self.args.burn_in:
self.epsilon = self.policy.update_epsilon(self.epsilon, self.args)
# get action
get_random_action = self.steps < self.args.burn_in
action = self.policy.get_action(
obs, eps_threshold=self.epsilon, give_random=get_random_action
)
# perform action
with torch.no_grad():
next_obs, reward, all_done = self.env.step(action)
# save experiance
self.policy.add_experience(action, obs, next_obs, reward)
# update policy
if self.steps >= self.args.burn_in:
self.policy.update_parameters(self.target_net)
# update target network
if (
self.steps % self.args.target_update == 0
and self.steps >= self.args.burn_in
):
print("+" * 5 + " updating target " "+" * 5)
self.target_net.load_state_dict(self.policy.state_dict())
torch.cuda.empty_cache()
obs = next_obs
self.steps += 1
# logs
recon = float((obs["score"] / obs["first_score"]).mean().item())
reward = float(
((obs["first_score"] - obs["score"]) / obs["first_score"]).mean().item()
)
self.ave_reward[self.episode % self.window_size] = reward
self.ave_recon[self.episode % self.window_size] = float(
(obs["score"] / obs["first_score"]).mean().item()
)
ave_reward = self.ave_reward[: self.episode + 1].mean()
ave_recon = self.ave_recon[: self.episode + 1].mean()
message = (
f"T Epoch: {self.epoch} Ep: {self.episode}, recon: {recon:.2f}, "
f"reward: {reward:.2f}, a_recon: {ave_recon:.2f}, a_reward: {ave_reward:.2f}, "
f" eps: {self.epsilon:.3f}, best: {self.best_loss:.3f}"
)
tqdm.write(message)
self.episode += 1
# logs
if self.steps >= self.args.burn_in:
self.writer.add_scalars(
"train_recon_|_", {self.args.exp_id: ave_recon}, self.steps
)
self.writer.add_scalars(
"train_reward_|_", {self.args.exp_id: ave_reward}, self.steps
)
# validation iteration
def validate(self, dataloader):
observations = []
scores = []
actions = []
names = []
print("*" * 30)
print("Doing Validation")
total = self.args.train_steps if not self.args.eval else None
for v, batch in enumerate(tqdm(dataloader, total=total)):
names += batch["names"]
if v > self.args.valid_steps - 1 and not self.args.eval:
break
obs = self.env.reset(batch)
all_done = False
cur_scores = [obs["score"]]
cur_actions = []
while not all_done:
# select actions
action = self.policy.get_action(
obs, eps_threshold=-1, give_random=False
)
# perform actions
with torch.no_grad():
next_obs, reward, all_done = self.env.step(action)
# record actions
torch.cuda.empty_cache()
obs = next_obs
cur_scores.append(obs["score"])
cur_actions.append(torch.FloatTensor(action))
observations.append(obs["mesh"])
scores.append(torch.stack(cur_scores).permute(1, 0))
actions.append(torch.stack(cur_actions).permute(1, 0))
print_score = (scores[-1][:, -1] / scores[-1][:, 0]).mean()
print_reward = (
(scores[-1][:, 0] - scores[-1][:, -1]) / scores[-1][:, 0]
).mean()
message = f"Valid || E: {self.epoch}, score: {print_score:.2f}, best score: {self.best_loss:.2f} "
message += f"reward = {print_reward:.2f}"
tqdm.write(message)
if self.args.visualize and v == 5 and self.args.eval:
meshes = torch.cat(observations, dim=0)[:, :, :3]
utils.visualize_prediction(
self.results_dir, meshes, self.env.mesh_info["faces"], names
)
self.env.reset_pybullet()
scores = torch.cat(scores)
actions = torch.cat(actions)
rewards = ((scores[:, 0] - scores[:, -1]) / scores[:, 0]).mean()
variation = torch.std(actions, dim=0).mean()
self.current_loss = (scores[:, -1] / scores[:, 0]).mean()
print("*" * 30)
message = f"Total Valid || E: {self.epoch}, score: {self.current_loss:.4f}, best score: {self.best_loss:.4f} "
message += f"reward = {rewards.mean():.2f}"
tqdm.write("*" * len(message))
tqdm.write(message)
tqdm.write("*" * len(message))
if not self.args.eval:
self.writer.add_scalars(
f"Valid_recon_|_", {self.args.exp_id: self.current_loss}, self.steps
)
self.writer.add_scalars(
f"Valid_reward_|_", {self.args.exp_id: rewards.mean()}, self.steps
)
self.writer.add_scalars(
"epsilon_|_", {self.args.exp_id: self.epsilon}, self.steps
)
self.writer.add_scalars(
f"Valid_variation_|_", {self.args.exp_id: variation}, self.steps
)
if self.args.visualize and self.args.eval:
utils.visualize_actions(self.results_dir, actions, self.args)
# check if the new validation score if better and save checkpoint
def check_values_and_save(self):
if self.best_loss >= self.current_loss:
improvement = self.best_loss - self.current_loss
print(
f"Saving with {improvement:.3f} improvement in Chamfer Distance on Validation Set "
)
self.best_loss = self.current_loss
self.save(best=True)
print(f"Saving DQN checkpoint")
self.save(best=False)
print("Saving replay memory.")
self.replay_memory.save(self.checkpoint_dir)
# resume training
def resume(self):
path = self.checkpoint_dir + "/recent"
if os.path.exists(path + "_model"):
print(f"Loading DQN checkpoint")
self.load(best=False)
print("Loading replay memory.")
self.replay_memory.load(path)
# save current state of training
def save(self, best=False):
if best:
path = self.checkpoint_dir + "/best"
else:
path = self.checkpoint_dir + "/recent"
self.replay_memory.save(path)
torch.save(
{
"dqn_weights": self.policy.state_dict(),
"target_weights": self.target_net.state_dict(),
"args": self.args,
"episode": self.episode,
"steps": self.steps,
"ave_reward": self.ave_reward,
"ave_recon": self.ave_recon,
"epsilon": self.epsilon,
"epoch": self.epoch,
},
path + "_model",
)
# load previous state of training
def load(self, best=True):
if self.args.pretrained:
prefix = "l" if self.args.use_latent else "g"
if self.args.use_img:
if self.args.finger:
path = (
os.path.dirname(pretrained.__file__)
+ f"/policies/DDQN/{prefix}_v_t_p"
)
else:
path = (
os.path.dirname(pretrained.__file__)
+ f"/policies/DDQN/{prefix}_v_t_g"
)
else:
if self.args.finger:
path = (
os.path.dirname(pretrained.__file__)
+ f"/policies/DDQN/{prefix}_t_p"
)
else:
path = (
os.path.dirname(pretrained.__file__)
+ f"/policies/DDQN/{prefix}_t_g"
)
checkpoint = torch.load(path)
self.policy.load_state_dict(checkpoint["dqn_weights"])
else:
if best:
path = self.checkpoint_dir + "/best_model"
else:
path = self.checkpoint_dir + "/recent_model"
checkpoint = torch.load(path)
self.policy.load_state_dict(checkpoint["dqn_weights"])
self.episode = checkpoint["episode"] + 1
if not self.args.eval:
self.target_net.load_state_dict(checkpoint["target_weights"])
self.steps = checkpoint["steps"]
self.ave_reward = checkpoint["ave_reward"]
self.ave_recon = checkpoint["ave_recon"]
self.epsilon = checkpoint["epsilon"]
self.epoch = checkpoint["epoch"] + 1
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--cut",
type=float,
default=0.33,
help="The shared size of features in the GCN.",
)
parser.add_argument(
"--layers", type=int, default=4, help="Number of layers in the q network"
)
parser.add_argument(
"--hidden_dim",
type=int,
default=200,
help="hidden dimension size in layers in the q network",
)
parser.add_argument(
"--epochs", type=int, default=1000, help="Number of epochs to use."
)
parser.add_argument(
"--limit_data",
action="store_true",
default=False,
help="use less data, for debugging.",
)
parser.add_argument(
"--finger", action="store_true", default=False, help="use only one finger."
)
parser.add_argument(
"--eval",
action="store_true",
default=False,
help="Evaluate the trained model on the test set.",
)
parser.add_argument(
"--touch_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/touch/best/",
help="the location of the touch part prediction.",
)
parser.add_argument(
"--vision_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/vision/t_p/",
help="the location of the touch part prediction.",
)
parser.add_argument(
"--auto_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/auto/t_p/",
help="the location of the autoencoder part prediction.",
)
parser.add_argument(
"--number_points",
type=int,
default=30000,
help="number of points sampled for the chamfer distance.",
)
parser.add_argument(
"--seed", type=int, default=0, help="Setting for the random seed."
)
parser.add_argument(
"--lr", type=float, default=0.0003, help="Initial learning rate."
)
parser.add_argument(
"--env_batch_size",
type=int,
default=3,
help="Size of the batch of objects sampled from the environment",
)
parser.add_argument(
"--train_batch_size",
type=int,
default=16,
help="Size of the batch of transitions sampled for training the q network.",
)
parser.add_argument(
"--exp_id", type=str, default="test", help="The experiment name."
)
parser.add_argument(
"--exp_type", type=str, default="test", help="The experiment group."
)
parser.add_argument(
"--use_img", action="store_true", default=False, help="To use the image."
)
parser.add_argument(
"--patience",
type=int,
default=70,
help="How many epochs without imporvement before training stops.",
)
parser.add_argument(
"--loss_coeff", type=float, default=9000.0, help="Coefficient for loss term."
)
parser.add_argument(
"--num_grasps", type=int, default=5, help="Number of grasps to train with. "
)
parser.add_argument("--budget", type=int, default=5)
parser.add_argument(
"--normalization",
type=str,
choices=["first", "current", "none"],
default="first",
help="how to normalize the reward for the q network update ",
)
parser.add_argument(
"--mem_capacity", type=int, default=300, help="the size of the replay buffer"
)
parser.add_argument("--burn_in", type=int, default=20, help="ddqn burn in time")
parser.add_argument(
"--num_actions", type=int, default=50, help=" number of possible actions"
)
parser.add_argument("--gamma", type=float, default=0, help="ddqn gamma value")
parser.add_argument(
"--epsilon_start", type=float, default=1.0, help="ddqn initial epsilon value"
)
parser.add_argument(
"--epsilon_decay", type=float, default=0.9999, help="ddqn epsilon decay value"
)
parser.add_argument(
"--epsilon_end", type=float, default=0.01, help="ddqn minimum epsilon value"
)
parser.add_argument(
"--train_steps",
type=int,
default=20,
help="number of training iterations per epoch",
)
parser.add_argument(
"--valid_steps",
type=int,
default=10,
help="number of validation iterations per epoch",
)
parser.add_argument(
"--target_update",
type=int,
default=3000,
help="frequency of target network updates",
)
parser.add_argument(
"--use_latent",
action="store_true",
default=False,
help="if the latent embedding of objects is to be used",
)
parser.add_argument(
"--use_recon",
action="store_true",
default=False,
help="if the object prediction is to be directly used",
)
parser.add_argument(
"--visualize",
action="store_true",
default=False,
help="visualize predictions and actions while evaluating",
)
parser.add_argument(
"--pretrained_recon",
action="store_true",
default=False,
help="use the pretrained reconstruction models to train",
)
parser.add_argument(
"--pretrained",
action="store_true",
default=False,
help="use the pretrained policy",
)
args = parser.parse_args()
trainer = Engine(args)
trainer()
| Active-3D-Vision-and-Touch-main | pterotactyl/policies/DDQN/train.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import random
from tqdm import tqdm
from torch.utils.data import DataLoader
import torch
import argparse
import numpy as np
from submitit.helpers import Checkpointable
from pterotactyl.policies import environment
from pterotactyl.utility import utils
from pterotactyl import pretrained
class Engine(Checkpointable):
def __init__(self, args):
self.args = args
def __call__(self):
# setup the environment, policy and data
self.env = environment.ActiveTouch(self.args)
self.spot = 0
self.actions = []
self.latents = []
train_loaders, valid_loaders = self.get_loaders()
self.results_dir = os.path.join("results", self.args.exp_type)
if not os.path.exists(self.results_dir):
os.makedirs(self.results_dir)
self.checkpoint_dir = os.path.join(
"experiments/checkpoint/", self.args.exp_type
)
if not os.path.exists((self.checkpoint_dir)):
os.makedirs(self.checkpoint_dir)
self.checkpoint = self.checkpoint_dir + "actions.npy"
# evaluate the policy
with torch.no_grad():
self.load()
if self.args.eval:
self.validate(valid_loaders)
else:
self.train(train_loaders)
self.save()
# load data using pytorch dataloader
def get_loaders(self):
if not self.args.eval:
train_loader = DataLoader(
self.env.train_data,
batch_size=self.args.env_batch_size,
shuffle=False,
num_workers=4,
collate_fn=self.env.train_data.collate,
)
else:
train_loader = []
valid_loader = DataLoader(
self.env.valid_data,
batch_size=self.args.env_batch_size,
shuffle=False,
num_workers=4,
collate_fn=self.env.valid_data.collate,
)
return train_loader, valid_loader
# compute the lowest error action for the current step
def train(self, dataloader):
# for all training data
training_length = len(dataloader)
random.seed(self.args.seed)
training_instances = random.sample(
range(training_length), int(training_length * 0.4)
)
for v, batch in enumerate(tqdm(dataloader)):
if v < self.spot:
continue
if v not in training_instances:
continue
self.spot = v
obs = self.env.reset(batch)
for i in range(self.args.budget):
# find best action
action, next_obs, reward, all_done = self.env.best_step(
greedy_checks=self.args.greedy_checks
)
# record action, embedding correspondence
for i in range(self.args.env_batch_size):
self.actions.append(action[i])
self.latents.append(obs["latent"][i])
obs = next_obs
if v % 3 == 0:
self.save()
# perfrom the validation
def validate(self, dataloader):
self.latents = torch.stack(self.latents).cuda()
observations = []
scores = []
actions = []
names = []
for v, batch in enumerate(tqdm(dataloader)):
names += batch["names"]
obs = self.env.reset(batch)
all_done = False
cur_scores = [obs["score"]]
cur_actions = []
while not all_done:
action = []
for i in range(self.args.env_batch_size):
latent_distance = (
(self.latents - obs["latent"][i].cuda()) ** 2
).mean(dim=1)
smallest_idxs = torch.topk(
latent_distance,
self.args.num_grasps * 5,
largest=False,
sorted=True,
)[1]
for idx in smallest_idxs:
possible_action = self.actions[idx]
if len(cur_actions) == 0:
action.append(possible_action)
break
seen_actions = list(
torch.stack(cur_actions)[:, i].data.cpu().numpy()
)
if possible_action not in seen_actions:
action.append(possible_action)
break
action = np.array(action)
next_obs, reward, all_done = self.env.step(action)
# record observation
obs = next_obs
cur_scores.append(obs["score"])
cur_actions.append(torch.FloatTensor(action))
observations.append(obs["mesh"])
scores.append(torch.stack(cur_scores).permute(1, 0))
actions.append(torch.stack(cur_actions).permute(1, 0))
print_score = (scores[-1][:, -1] / scores[-1][:, 0]).mean()
print_reward = (
(scores[-1][:, 0] - scores[-1][:, -1]) / scores[-1][:, 0]
).mean()
message = f"Valid || score: {print_score:.4f}, "
message += f"reward = {print_reward:.4f}"
tqdm.write(message)
if self.args.visualize and v == 5:
meshes = torch.cat(observations, dim=0)[:, :, :3]
utils.visualize_prediction(
self.results_dir, meshes, self.env.mesh_info["faces"], names
)
self.env.reset_pybullet()
scores = torch.cat(scores)
rewards = ((scores[:, 0] - scores[:, -1]) / scores[:, 0]).mean()
current_loss = (scores[:, -1] / scores[:, 0]).mean()
if self.args.visualize:
actions = torch.stack(actions).view(-1, self.args.budget)
utils.visualize_actions(self.results_dir, actions, self.args)
print("*" * 30)
message = f"Total Valid || score: {current_loss:.4f}, "
message += f"reward = {rewards.mean():.4f}"
tqdm.write("*" * len(message))
tqdm.write(message)
tqdm.write("*" * len(message))
def load(self):
if self.args.pretrained:
if self.args.use_img:
if self.args.finger:
location = (
os.path.dirname(pretrained.__file__)
+ "/policies/NearestNeighbor/v_t_p.npy"
)
else:
location = (
os.path.dirname(pretrained.__file__)
+ "/policies/NearestNeighbor/v_t_g.npy"
)
else:
if self.args.finger:
location = (
os.path.dirname(pretrained.__file__)
+ "/policies/NearestNeighbor/t_p.npy"
)
else:
location = (
os.path.dirname(pretrained.__file__)
+ "/policies/NearestNeighbor/t_g.npy"
)
data = np.load(location, allow_pickle=True).item()
self.actions = list(data["actions"])
self.latents = [torch.FloatTensor(d) for d in data["latents"]]
self.spot = data["spot"]
else:
try:
data = np.load(self.checkpoint, allow_pickle=True).item()
self.actions = list(data["actions"])
self.latents = [torch.FloatTensor(d) for d in data["latents"]]
self.spot = data["spot"]
except:
return
def save(self):
actions = np.array(self.actions)
latents = torch.stack(self.latents).data.cpu().numpy()
data = {"actions": actions, "latents": latents, "spot": self.spot}
np.save(self.checkpoint, data)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--limit_data",
action="store_true",
default=False,
help="use less data, for debugging.",
)
parser.add_argument(
"--finger", action="store_true", default=False, help="use only one finger."
)
parser.add_argument(
"--touch_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/touch/best/",
help="the location of the touch part prediction.",
)
parser.add_argument(
"--vision_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/vision/t_p/",
help="the location of the touch part prediction.",
)
parser.add_argument(
"--auto_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/auto/t_p/",
help="the location of the autoencoder part prediction.",
)
parser.add_argument(
"--number_points",
type=int,
default=30000,
help="number of points sampled for the chamfer distance.",
)
parser.add_argument(
"--seed", type=int, default=0, help="Setting for the random seed."
)
parser.add_argument(
"--env_batch_size", type=int, default=3, help="Size of the batch."
)
parser.add_argument(
"--use_img", action="store_true", default=False, help="To use the image."
)
parser.add_argument(
"--loss_coeff", type=float, default=9000.0, help="Coefficient for loss term."
)
parser.add_argument(
"--num_grasps", type=int, default=5, help="Number of grasps to train with. "
)
parser.add_argument(
"--num_actions", type=int, default=50, help="number of action options"
)
parser.add_argument(
"--eval", action="store_true", default=False, help="for evaluating on test set"
)
parser.add_argument(
"--budget", type=int, default=5, help="number of graspsp to perform"
)
parser.add_argument(
"--visualize",
action="store_true",
default=False,
help="visualize predictions and actions while evaluating",
)
parser.add_argument(
"--exp_type", type=str, default="test", help="The experiment group."
)
parser.add_argument(
"--greedy_checks",
type=int,
default=50,
help="Number of actions to check at each time step",
)
parser.add_argument(
"--pretrained_recon",
action="store_true",
default=False,
help="use the pretrained reconstruction models to train",
)
parser.add_argument(
"--pretrained",
action="store_true",
default=False,
help="use the pretrained policy",
)
args = parser.parse_args()
args.use_recon = False
args.use_latent = True
trainer = Engine(args)
trainer()
| Active-3D-Vision-and-Touch-main | pterotactyl/policies/NearestNeighbor/train.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import random
# class for getting radnom samples from the space of action
class random_sampler:
def __init__(self, args):
super().__init__()
self.args = args
def get_action(self, mask):
batch_size = mask.shape[0]
actions = []
for b in range(batch_size):
propositions = list(np.arange(self.args.num_actions))
indexes = list(np.where(mask[b] > 0)[0])
if len(indexes) > 0:
for index in sorted(indexes, reverse=True):
del propositions[index]
actions.append(random.choice(propositions))
return np.array(actions)
# class for evenly spaced samples from the space of actions
class even_sampler:
def __init__(self, args):
super().__init__()
self.args = args
self.generate_points()
# precompute the actions to be used in the trajectory
def generate_points(self):
self.angles = []
for i in range(self.args.env_batch_size):
spacing = self.args.num_actions // self.args.num_grasps
set = [spacing * i for i in range(self.args.num_grasps)]
update_num = random.choice(range(self.args.num_actions))
choice = []
for j in range(self.args.num_grasps):
choice.append((set[j] + update_num) % self.args.num_actions)
self.angles.append(choice)
# reset the precomputed actions
def reset(self):
self.generate_points()
def get_action(self, mask):
batch_size = mask.shape[0]
actions = []
for b in range(batch_size):
actions.append(self.angles[b][0])
del self.angles[b][0]
return np.array(actions)
| Active-3D-Vision-and-Touch-main | pterotactyl/policies/baselines/baselines.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from tqdm import tqdm
import os
from torch.utils.data import DataLoader
import torch
import argparse
from pterotactyl.policies import environment
from pterotactyl.policies.baselines import baselines
from pterotactyl.utility import utils
from pterotactyl import pretrained
class Engine:
def __init__(self, args):
self.args = args
def __call__(self):
# set up environment and policy and data
utils.set_seeds(self.args.seed)
self.env = environment.ActiveTouch(self.args)
self.policy = baselines.even_sampler(self.args)
valid_loaders = self.get_loaders()
self.results_dir = os.path.join("results", self.args.exp_type)
if not os.path.exists(self.results_dir):
os.makedirs(self.results_dir)
# compute accuracy
with torch.no_grad():
self.validate(valid_loaders)
# load data with pytorch dataloader
def get_loaders(self):
valid_loader = DataLoader(
self.env.valid_data,
batch_size=self.args.env_batch_size,
shuffle=False,
num_workers=10,
collate_fn=self.env.valid_data.collate,
)
return valid_loader
# perform the even policy
def validate(self, dataloader):
observations = []
scores = []
actions = []
names = []
for v, batch in enumerate(tqdm(dataloader)):
names += batch["names"]
obs = self.env.reset(batch)
self.policy.reset()
all_done = False
cur_observations = [obs]
cur_scores = [obs["score"]]
cur_actions = []
while not all_done:
# select actions
action = self.policy.get_action(obs["mask"])
# perform actions
with torch.no_grad():
next_obs, reward, all_done = self.env.step(action)
# record actions
torch.cuda.empty_cache()
obs = next_obs
cur_scores.append(obs["score"])
cur_actions.append(torch.FloatTensor(action))
observations.append(obs["mesh"])
scores.append(torch.stack(cur_scores).permute(1, 0))
actions.append(torch.stack(cur_actions).permute(1, 0))
print_score = (scores[-1][:, -1] / scores[-1][:, 0]).mean()
print_reward = (
(scores[-1][:, 0] - scores[-1][:, -1]) / scores[-1][:, 0]
).mean()
temp_scored = torch.cat(scores)
current_loss = (temp_scored[:, -1] / temp_scored[:, 0]).mean()
message = f"Valid || score: {print_score:.4f} "
message += f"reward = {print_reward:.4f} ave: {100*current_loss:.4f} %"
tqdm.write(message)
if self.args.visualize and v == 5:
meshes = torch.cat(observations, dim=0)[:, :, :3]
utils.visualize_prediction(
self.results_dir, meshes, self.env.mesh_info["faces"], names
)
self.env.reset_pybullet()
if self.args.visualize:
actions = torch.stack(actions).view(-1, self.args.budget)
utils.visualize_actions(self.results_dir, actions, self.args)
scores = torch.cat(scores)
rewards = ((scores[:, 0] - scores[:, -1]) / scores[:, 0]).mean()
current_loss = (scores[:, -1] / scores[:, 0]).mean()
print("*" * 30)
message = f"Total Valid || score: {current_loss:.4f}, "
message += f"reward = {rewards.mean():.4f}"
tqdm.write("*" * len(message))
tqdm.write(message)
tqdm.write("*" * len(message))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--limit_data",
action="store_true",
default=False,
help="use less data, for debugging.",
)
parser.add_argument(
"--finger", action="store_true", default=False, help="use only one finger."
)
parser.add_argument(
"--touch_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/touch/best/",
help="the location of the touch part prediction.",
)
parser.add_argument(
"--vision_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/vision/t_p/",
help="the location of the vision part prediction.",
)
parser.add_argument(
"--number_points",
type=int,
default=30000,
help="number of points sampled for the chamfer distance.",
)
parser.add_argument(
"--seed", type=int, default=0, help="Setting for the random seed."
)
parser.add_argument(
"--env_batch_size", type=int, default=3, help="Size of the batch."
)
parser.add_argument(
"--use_img", action="store_true", default=False, help="To use the image."
)
parser.add_argument(
"--loss_coeff", type=float, default=9000.0, help="Coefficient for loss term."
)
parser.add_argument(
"--num_grasps", type=int, default=5, help="Number of grasps to train with. "
)
parser.add_argument("--num_actions", type=int, default=50)
parser.add_argument("--use_latent", action="store_true", default=False)
parser.add_argument("--use_recon", action="store_true", default=False)
parser.add_argument(
"--eval", type=bool, default=True, help="for evaluating on test set"
)
parser.add_argument(
"--budget", type=int, default=5, help="number of graspsp to perform"
)
parser.add_argument(
"--visualize",
action="store_true",
default=False,
help="visualize predictions and actions while evaluating",
)
parser.add_argument(
"--exp_type", type=str, default="test", help="The experiment group."
)
parser.add_argument(
"--pretrained_recon",
action="store_true",
default=False,
help="use the pretrained reconstruction models to train",
)
args = parser.parse_args()
trainer = Engine(args)
trainer()
| Active-3D-Vision-and-Touch-main | pterotactyl/policies/baselines/even.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from tqdm import tqdm
import os
from torch.utils.data import DataLoader
import torch
import argparse
from pterotactyl.policies import environment
from pterotactyl.policies.baselines import baselines
from pterotactyl.utility import utils
from pterotactyl import pretrained
class Engine:
def __init__(self, args):
self.args = args
def __call__(self):
# setup the environment, policy and data
utils.set_seeds(self.args.seed)
self.env = environment.ActiveTouch(self.args)
self.policy = baselines.even_sampler(self.args)
self.results_dir = os.path.join("results", self.args.exp_type)
if not os.path.exists(self.results_dir):
os.makedirs(self.results_dir)
valid_loaders = self.get_loaders()
# evaluate the policy
with torch.no_grad():
self.validate(valid_loaders)
# load data using pytorch dataloader
def get_loaders(self):
valid_loader = DataLoader(
self.env.valid_data,
batch_size=self.args.env_batch_size,
shuffle=False,
num_workers=4,
collate_fn=self.env.valid_data.collate,
)
return valid_loader
# perfrom the validation
def validate(self, dataloader):
observations = []
scores = []
actions = []
names = []
for v, batch in enumerate(tqdm(dataloader)):
names += batch["names"]
obs = self.env.reset(batch)
self.policy.reset()
all_done = False
cur_scores = [obs["score"]]
cur_actions = []
while not all_done:
# perform actions
with torch.no_grad():
action, next_obs, reward, all_done = self.env.best_step(
greedy_checks=self.args.greedy_checks
)
# record observation
torch.cuda.empty_cache()
obs = next_obs
cur_scores.append(obs["score"])
cur_actions.append(torch.FloatTensor(action))
observations.append(obs["mesh"])
scores.append(torch.stack(cur_scores).permute(1, 0))
actions.append(torch.stack(cur_actions).permute(1, 0))
print_score = (scores[-1][:, -1] / scores[-1][:, 0]).mean()
print_reward = (
(scores[-1][:, 0] - scores[-1][:, -1]) / scores[-1][:, 0]
).mean()
temp_scored = torch.cat(scores)
current_loss = (temp_scored[:, -1] / temp_scored[:, 0]).mean()
message = f"Valid || score: {print_score:.4f} "
message += f"reward = {print_reward:.4f} ave: {100 * current_loss:.4f} %"
tqdm.write(message)
if self.args.visualize and v == 5:
meshes = torch.cat(observations, dim=0)[:, :, :3]
utils.visualize_prediction(
self.results_dir, meshes, self.env.mesh_info["faces"], names
)
self.env.reset_pybullet()
if self.args.visualize:
actions = torch.stack(actions).view(-1, self.args.budget)
utils.visualize_actions(self.results_dir, actions, self.args)
scores = torch.cat(scores)
rewards = ((scores[:, 0] - scores[:, -1]) / scores[:, 0]).mean()
current_loss = (scores[:, -1] / scores[:, 0]).mean()
print("*" * 30)
message = f"Total Valid || score: {current_loss:.4f}, "
message += f"reward = {rewards.mean():.4f}"
tqdm.write("*" * len(message))
tqdm.write(message)
tqdm.write("*" * len(message))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--limit_data",
action="store_true",
default=False,
help="use less data, for debugging.",
)
parser.add_argument(
"--finger", action="store_true", default=False, help="use only one finger."
)
parser.add_argument(
"--touch_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/touch/best/",
help="the location of the touch part prediction.",
)
parser.add_argument(
"--vision_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/vision/t_p/",
help="the location of the touch part prediction.",
)
parser.add_argument(
"--number_points",
type=int,
default=30000,
help="number of points sampled for the chamfer distance.",
)
parser.add_argument(
"--seed", type=int, default=0, help="Setting for the random seed."
)
parser.add_argument(
"--env_batch_size", type=int, default=3, help="Size of the batch."
)
parser.add_argument(
"--use_img", action="store_true", default=False, help="To use the image."
)
parser.add_argument(
"--loss_coeff", type=float, default=9000.0, help="Coefficient for loss term."
)
parser.add_argument(
"--num_grasps", type=int, default=5, help="Number of grasps to train with. "
)
parser.add_argument(
"--greedy_checks",
type=int,
default=50,
help="Number of actions to check at each time step",
)
parser.add_argument(
"--num_actions", type=int, default=50, help="number of action options"
)
parser.add_argument("--use_latent", action="store_true", default=False)
parser.add_argument("--use_recon", action="store_true", default=False)
parser.add_argument(
"--eval", type=bool, default=True, help="for evaluating on test set"
)
parser.add_argument(
"--budget", type=int, default=5, help="number of graspsp to perform"
)
parser.add_argument(
"--visualize",
action="store_true",
default=False,
help="visualize predictions and actions while evaluating",
)
parser.add_argument(
"--exp_type", type=str, default="test", help="The experiment group."
)
parser.add_argument(
"--pretrained_recon",
action="store_true",
default=False,
help="use the pretrained reconstruction models to train",
)
args = parser.parse_args()
trainer = Engine(args)
trainer()
| Active-3D-Vision-and-Touch-main | pterotactyl/policies/baselines/greedy.py |
Active-3D-Vision-and-Touch-main | pterotactyl/policies/baselines/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from tqdm import tqdm
from torch.utils.data import DataLoader
import torch
import argparse
from pterotactyl.policies import environment
from pterotactyl.policies.baselines import baselines
from pterotactyl.utility import utils
from pterotactyl import pretrained
class Engine:
def __init__(self, args):
self.args = args
def __call__(self):
# set up environment and policy and data
utils.set_seeds(self.args.seed)
self.env = environment.ActiveTouch(self.args)
self.policy = baselines.random_sampler(self.args)
valid_loaders = self.get_loaders()
self.results_dir = os.path.join("results", self.args.exp_type)
if not os.path.exists(self.results_dir):
os.makedirs(self.results_dir)
# compute accuracy
with torch.no_grad():
self.validate(valid_loaders)
# load data with pytorch dataloader
def get_loaders(self):
valid_loader = DataLoader(
self.env.valid_data,
batch_size=self.args.env_batch_size,
shuffle=False,
num_workers=4,
collate_fn=self.env.valid_data.collate,
)
return valid_loader
# perform the random policy
def validate(self, dataloader):
observations = []
scores = []
actions = []
names = []
for v, batch in enumerate(tqdm(dataloader)):
names += batch["names"]
obs = self.env.reset(batch)
all_done = False
cur_scores = [obs["score"]]
cur_actions = []
while not all_done:
# select actions
action = self.policy.get_action(obs["mask"])
# perform actions
with torch.no_grad():
next_obs, reward, all_done = self.env.step(action)
# record observations
torch.cuda.empty_cache()
obs = next_obs
cur_scores.append(obs["score"])
cur_actions.append(torch.FloatTensor(action))
observations.append(obs["mesh"])
scores.append(torch.stack(cur_scores).permute(1, 0))
actions.append(torch.stack(cur_actions).permute(1, 0))
print_score = (scores[-1][:, -1] / scores[-1][:, 0]).mean()
print_reward = (
(scores[-1][:, 0] - scores[-1][:, -1]) / scores[-1][:, 0]
).mean()
temp_scored = torch.cat(scores)
current_loss = (temp_scored[:, -1] / temp_scored[:, 0]).mean()
message = f"Valid || score: {print_score:.4f} "
message += f"reward = {print_reward:.4f} ave: {100 * current_loss:.4f} %"
tqdm.write(message)
if self.args.visualize and v == 5:
meshes = torch.cat(observations, dim=0)[:, :, :3]
utils.visualize_prediction(
self.results_dir, meshes, self.env.mesh_info["faces"], names
)
self.env.reset_pybullet()
scores = torch.cat(scores)
rewards = ((scores[:, 0] - scores[:, -1]) / scores[:, 0]).mean()
current_loss = (scores[:, -1] / scores[:, 0]).mean()
if self.args.visualize:
print("visualizing")
actions = torch.stack(actions).view(-1, self.args.budget)
utils.visualize_actions(self.results_dir, actions, self.args)
print("*" * 30)
message = f"Total Valid || score: {current_loss:.4f}, "
message += f"reward = {rewards.mean():.4f}"
tqdm.write("*" * len(message))
tqdm.write(message)
tqdm.write("*" * len(message))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--limit_data",
action="store_true",
default=False,
help="use less data, for debugging.",
)
parser.add_argument(
"--finger", action="store_true", default=False, help="use only one finger."
)
parser.add_argument(
"--touch_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/touch/best",
help="the location of the touch part prediction.",
)
parser.add_argument(
"--vision_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/vision/t_p/",
help="the location of the vision part prediction.",
)
parser.add_argument(
"--number_points",
type=int,
default=30000,
help="number of points sampled for the chamfer distance.",
)
parser.add_argument(
"--seed", type=int, default=0, help="Setting for the random seed."
)
parser.add_argument(
"--env_batch_size", type=int, default=3, help="Size of the batch."
)
parser.add_argument(
"--use_img", action="store_true", default=False, help="To use the image."
)
parser.add_argument(
"--loss_coeff", type=float, default=9000.0, help="Coefficient for loss term."
)
parser.add_argument(
"--num_grasps", type=int, default=5, help="Number of grasps to train with. "
)
parser.add_argument("--num_actions", type=int, default=50)
parser.add_argument("--use_latent", action="store_true", default=False)
parser.add_argument("--use_recon", action="store_true", default=False)
parser.add_argument(
"--eval", type=bool, default=True, help="for evaluating on test set"
)
parser.add_argument(
"--budget", type=int, default=5, help="number of graspsp to perform"
)
parser.add_argument(
"--visualize",
action="store_true",
default=False,
help="visualize predictions and actions while evaluating",
)
parser.add_argument(
"--exp_type", type=str, default="test", help="The experiment group."
)
parser.add_argument(
"--pretrained_recon",
action="store_true",
default=False,
help="use the pretrained reconstruction models to train",
)
args = parser.parse_args()
trainer = Engine(args)
trainer()
| Active-3D-Vision-and-Touch-main | pterotactyl/policies/baselines/rand.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import random
from tqdm import tqdm
import numpy as np
from torch.utils.data import DataLoader
import torch
import argparse
from submitit.helpers import Checkpointable
from pterotactyl.policies import environment
from pterotactyl.utility import utils
from pterotactyl import pretrained
class Engine(Checkpointable):
def __init__(self, args):
self.args = args
def __call__(self):
# setup the environment, and data
self.env = environment.ActiveTouch(self.args)
data_loaders, valid_loaders = self.get_loaders()
self.chosen_actions = []
self.step = 0
self.spot = 0
self.counts = np.array([0.0 for i in range(self.args.num_actions)])
# save location for the computed trajectory
self.results_dir = os.path.join("results", self.args.exp_type)
if not os.path.exists(self.results_dir):
os.makedirs(self.results_dir)
self.checkpoint_dir = os.path.join(
"experiments/checkpoint/", "MFBA", self.args.exp_type
)
if not os.path.exists((self.checkpoint_dir)):
os.makedirs(self.checkpoint_dir)
self.checkpoint = self.checkpoint_dir + "actions.npy"
with torch.no_grad():
self.load()
if self.args.eval:
self.validate(valid_loaders)
else:
# find the best action at every step
for i in range(self.step, self.args.num_grasps):
self.train(data_loaders)
self.save()
# load data using pytorch dataloader
def get_loaders(self):
if not self.args.eval:
train_loader = DataLoader(
self.env.train_data,
batch_size=self.args.env_batch_size,
shuffle=False,
num_workers=4,
collate_fn=self.env.train_data.collate,
)
else:
train_loader = []
valid_loader = DataLoader(
self.env.valid_data,
batch_size=self.args.env_batch_size,
shuffle=False,
num_workers=4,
collate_fn=self.env.valid_data.collate,
)
return train_loader, valid_loader
# compute the lowest error action for the current step
def train(self, dataloader):
print(f"Getting best action for step {len(self.chosen_actions)+1}")
training_length = len(dataloader)
random.seed(self.args.seed)
training_instances = random.sample(
range(training_length), int(training_length * 0.4)
)
for v, batch in enumerate(tqdm(dataloader)):
if v < self.spot:
continue
if v not in training_instances:
continue
self.spot = v
self.env.reset(batch)
# check the accuracy of every action
for action in self.chosen_actions:
actions = np.array([action for _ in range(self.args.env_batch_size)])
self.env.step(actions)
actions, _, _, _ = self.env.best_step(greedy_checks=self.args.greedy_checks)
# update the count for most successful action
for a in actions:
self.counts[a] += 1
if v % 20 == 0:
self.save()
self.chosen_actions.append(np.argmax(self.counts))
self.counts = np.array(
[
0 if i not in self.chosen_actions else -1e20
for i in range(self.args.num_actions)
]
)
self.spot = 0
self.step += 1
# evaluate the policy
def validate(self, dataloader):
observations = []
scores = []
actions = []
names = []
for v, batch in enumerate(tqdm(dataloader)):
names += batch["names"]
obs = self.env.reset(batch)
cur_scores = [obs["score"]]
cur_actions = []
for action in self.chosen_actions:
best_actions = np.array(
[action for _ in range(self.args.env_batch_size)]
)
# perform actions
with torch.no_grad():
next_obs, _, _ = self.env.step(best_actions)
# record actions
torch.cuda.empty_cache()
obs = next_obs
cur_scores.append(obs["score"])
cur_actions.append(torch.FloatTensor(best_actions))
observations.append(obs["mesh"])
scores.append(torch.stack(cur_scores).permute(1, 0))
actions.append(torch.stack(cur_actions).permute(1, 0))
print_score = (scores[-1][:, -1] / scores[-1][:, 0]).mean()
print_reward = (
(scores[-1][:, 0] - scores[-1][:, -1]) / scores[-1][:, 0]
).mean()
message = f"Valid || score: {print_score:.4f}, "
message += f"reward = {print_reward:.4f}"
tqdm.write(message)
if self.args.visualize and v == 5:
meshes = torch.cat(observations, dim=0)[:, :, :3]
utils.visualize_prediction(
self.results_dir, meshes, self.env.mesh_info["faces"], names
)
self.env.reset_pybullet()
scores = torch.cat(scores)
rewards = ((scores[:, 0] - scores[:, -1]) / scores[:, 0]).mean()
current_loss = (scores[:, -1] / scores[:, 0]).mean()
if self.args.visualize:
actions = torch.stack(actions).view(-1, self.args.budget)
utils.visualize_actions(self.results_dir, actions, self.args)
print("*" * 30)
message = f"Total Valid || score: {current_loss:.4f}, "
message += f"reward = {rewards.mean():.4f}"
tqdm.write("*" * len(message))
tqdm.write(message)
tqdm.write("*" * len(message))
def load(self):
if self.args.pretrained:
if self.args.use_img:
if self.args.finger:
location = (
os.path.dirname(pretrained.__file__)
+ "/policies/dataset_specific/MFBA_v_t_p.npy"
)
else:
location = (
os.path.dirname(pretrained.__file__)
+ "/policies/dataset_specific/MFBA_v_t_g.npy"
)
else:
if self.args.finger:
location = (
os.path.dirname(pretrained.__file__)
+ "/policies/dataset_specific/MFBA_t_p.npy"
)
else:
location = (
os.path.dirname(pretrained.__file__)
+ "/policies/dataset_specific/MFBA_t_g.npy"
)
data = np.load(location, allow_pickle=True).item()
self.counts = data["counts"]
self.chosen_actions = data["chosen_actions"]
self.spot = data["spot"]
self.step = data["step"]
else:
try:
data = np.load(self.checkpoint, allow_pickle=True).item()
self.counts = data["counts"]
self.chosen_actions = data["chosen_actions"]
self.spot = data["spot"]
self.step = data["step"]
except:
return
def save(self):
data = {
"counts": self.counts,
"chosen_actions": self.chosen_actions,
"step": self.step,
"spot": self.spot,
}
np.save(self.checkpoint, data)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--limit_data",
action="store_true",
default=False,
help="use less data, for debugging.",
)
parser.add_argument(
"--finger", action="store_true", default=False, help="use only one finger."
)
parser.add_argument(
"--touch_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/touch/best/",
help="the location of the touch part prediction.",
)
parser.add_argument(
"--vision_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/vision/t_p/",
help="the location of the touch part prediction.",
)
parser.add_argument(
"--number_points",
type=int,
default=30000,
help="number of points sampled for the chamfer distance.",
)
parser.add_argument(
"--seed", type=int, default=0, help="Setting for the random seed."
)
parser.add_argument(
"--env_batch_size", type=int, default=3, help="Size of the batch."
)
parser.add_argument(
"--use_img", action="store_true", default=False, help="To use the image."
)
parser.add_argument(
"--loss_coeff", type=float, default=9000.0, help="Coefficient for loss term."
)
parser.add_argument(
"--num_grasps", type=int, default=5, help="Number of grasps to train with. "
)
parser.add_argument("--num_actions", type=int, default=50)
parser.add_argument("--use_latent", action="store_true", default=False)
parser.add_argument("--use_recon", action="store_true", default=False)
parser.add_argument(
"--eval",
action="store_true",
default=False,
help="Evaluate the trained model on the test set.",
)
parser.add_argument(
"--budget", type=int, default=5, help="number of graspsp to perform"
)
parser.add_argument(
"--visualize",
action="store_true",
default=False,
help="visualize predictions and actions while evaluating",
)
parser.add_argument(
"--exp_type", type=str, default="test", help="The experiment group."
)
parser.add_argument(
"--greedy_checks",
type=int,
default=50,
help="Number of actions to check at each time step",
)
parser.add_argument(
"--pretrained_recon",
action="store_true",
default=False,
help="use the pretrained reconstruction models to train",
)
parser.add_argument(
"--pretrained",
action="store_true",
default=False,
help="use the pretrained policy",
)
args = parser.parse_args()
trainer = Engine(args)
trainer()
| Active-3D-Vision-and-Touch-main | pterotactyl/policies/dataset_specific/MFBA.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import random
from tqdm.notebook import tqdm
import numpy as np
from torch.utils.data import DataLoader
import torch
import argparse
from submitit.helpers import Checkpointable
from pterotactyl.policies import environment
from pterotactyl.utility import utils
from pterotactyl import pretrained
class Engine(Checkpointable):
def __init__(self, args):
self.args = args
def __call__(self) -> float:
# setup the environment, and data
self.env = environment.ActiveTouch(self.args)
data_loaders, valid_loaders = self.get_loaders()
self.chosen_actions = []
self.step = 0
self.spot = 0
self.action_scores = np.array(
[
1e10 if i not in self.chosen_actions else 1e20
for i in range(self.args.num_actions)
]
)
self.checks = np.array([1.0 for i in range(self.args.num_actions)])
# save location for the computed trajectory
self.results_dir = os.path.join("results", self.args.exp_type)
if not os.path.exists(self.results_dir):
os.makedirs(self.results_dir)
self.checkpoint_dir = os.path.join(
"experiments/checkpoint/", "LEBA", self.args.exp_type
)
if not os.path.exists((self.checkpoint_dir)):
os.makedirs(self.checkpoint_dir)
self.checkpoint = self.checkpoint_dir + "actions.npy"
with torch.no_grad():
self.load()
if self.args.eval:
self.validate(valid_loaders)
else:
# find the best action at every step
for i in range(self.step, self.args.num_grasps):
self.train(data_loaders)
self.save()
# load data using pytorch dataloader
def get_loaders(self):
if not self.args.eval:
train_loader = DataLoader(
self.env.train_data,
batch_size=self.args.env_batch_size,
shuffle=False,
num_workers=4,
collate_fn=self.env.train_data.collate,
)
else:
train_loader = []
valid_loader = DataLoader(
self.env.valid_data,
batch_size=self.args.env_batch_size,
shuffle=False,
num_workers=4,
collate_fn=self.env.valid_data.collate,
)
return train_loader, valid_loader
# compute the lowest error action for the current step
def train(self, dataloader):
print(f"Getting best action for step {len(self.chosen_actions)+1}")
# for all training data
training_length = len(dataloader)
random.seed(self.args.seed)
training_instances = random.sample(
range(training_length), int(training_length * 0.4)
)
for v, batch in enumerate(tqdm(dataloader)):
if v < self.spot:
continue
if v not in training_instances:
continue
self.spot = v
self.env.reset(batch)
# check the accuracy of every action
for action in self.chosen_actions:
actions = np.array([action for _ in range(self.args.env_batch_size)])
self.env.step(actions)
remaining_actions = [
i for i in range(self.args.num_actions) if i not in self.chosen_actions
]
remaining_actions = [
remaining_actions for i in range(self.args.env_batch_size)
]
if self.args.greedy_checks < self.args.num_actions:
for i in range(self.args.env_batch_size):
remaining_actions[i] = random.sample(
remaining_actions[i], self.args.greedy_checks
)
for i in range(len(remaining_actions[0])):
actions = np.array(
[remaining_actions[j][i] for j in range(self.args.env_batch_size)]
)
scores = (
self.env.check_step(actions)["score"]
/ self.env.check_step(actions)["first_score"]
)
for action, score in zip(actions, scores):
if self.action_scores[action] == 1e10:
self.action_scores[action] = score
else:
self.action_scores[action] += score
self.checks[action] += 1.0
if v % 20 == 0:
self.save()
# record the lowest error action
action_scores = self.action_scores / self.checks
self.chosen_actions.append(np.argmin(action_scores))
self.action_scores = np.array(
[
1e10 if i not in self.chosen_actions else 1e20
for i in range(self.args.num_actions)
]
)
self.checks = np.array([1.0 for i in range(self.args.num_actions)])
self.spot = 0
self.step += 1
# validate the chosen trajectory
def validate(self, dataloader):
observations = []
scores = []
actions = []
names = []
for v, batch in enumerate(tqdm(dataloader)):
names += batch["names"]
obs = self.env.reset(batch)
cur_scores = [obs["score"]]
cur_actions = []
for action in self.chosen_actions:
best_actions = np.array(
[action for _ in range(self.args.env_batch_size)]
)
# perform actions
with torch.no_grad():
next_obs, _, _ = self.env.step(best_actions)
# record actions
torch.cuda.empty_cache()
obs = next_obs
cur_scores.append(obs["score"])
cur_actions.append(torch.FloatTensor(best_actions))
observations.append(obs["mesh"])
scores.append(torch.stack(cur_scores).permute(1, 0))
actions.append(torch.stack(cur_actions).permute(1, 0))
print_score = (scores[-1][:, -1] / scores[-1][:, 0]).mean()
print_reward = (
(scores[-1][:, 0] - scores[-1][:, -1]) / scores[-1][:, 0]
).mean()
message = f"Valid || score: {print_score:.4f}, "
message += f"reward = {print_reward:.4f}"
tqdm.write(message)
if self.args.visualize and v == 5:
meshes = torch.cat(observations, dim=0)[:, :, :3]
utils.visualize_prediction(
self.results_dir, meshes, self.env.mesh_info["faces"], names
)
self.env.reset_pybullet()
scores = torch.cat(scores)
rewards = ((scores[:, 0] - scores[:, -1]) / scores[:, 0]).mean()
current_loss = (scores[:, -1] / scores[:, 0]).mean()
if self.args.visualize:
actions = torch.stack(actions).view(-1, self.args.budget)
utils.visualize_actions(self.results_dir, actions, self.args)
print("*" * 30)
message = f"Total Valid || score: {current_loss:.4f}, "
message += f"reward = {rewards.mean():.4f}"
tqdm.write("*" * len(message))
tqdm.write(message)
tqdm.write("*" * len(message))
def load(self):
if self.args.pretrained:
if self.args.use_img:
if self.args.finger:
location = (
os.path.dirname(pretrained.__file__)
+ "/policies/dataset_specific/LEBA_v_t_p.npy"
)
else:
location = (
os.path.dirname(pretrained.__file__)
+ "/policies/dataset_specific/LEBA_v_t_g.npy"
)
else:
if self.args.finger:
location = (
os.path.dirname(pretrained.__file__)
+ "/policies/dataset_specific/LEBA_t_p.npy"
)
else:
location = (
os.path.dirname(pretrained.__file__)
+ "/policies/dataset_specific/LEBA_t_g.npy"
)
data = np.load(location, allow_pickle=True).item()
self.action_scores = data["action_scores"]
self.checks = data["checks"]
self.chosen_actions = data["chosen_actions"]
self.spot = data["spot"]
self.step = data["step"]
else:
try:
data = np.load(self.checkpoint, allow_pickle=True).item()
self.action_scores = data["action_scores"]
self.checks = data["checks"]
self.chosen_actions = data["chosen_actions"]
self.spot = data["spot"]
self.step = data["step"]
except:
return
def save(self):
data = {
"action_scores": self.action_scores,
"checks": self.checks,
"chosen_actions": self.chosen_actions,
"step": self.step,
"spot": self.spot,
}
np.save(self.checkpoint, data)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--limit_data",
action="store_true",
default=False,
help="use less data, for debugging.",
)
parser.add_argument(
"--finger", action="store_true", default=False, help="use only one finger."
)
parser.add_argument(
"--touch_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/touch/best/",
help="the location of the touch part prediction.",
)
parser.add_argument(
"--vision_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/vision/t_p/",
help="the location of the touch part prediction.",
)
parser.add_argument(
"--number_points",
type=int,
default=30000,
help="number of points sampled for the chamfer distance.",
)
parser.add_argument(
"--seed", type=int, default=0, help="Setting for the random seed."
)
parser.add_argument(
"--env_batch_size", type=int, default=3, help="Size of the batch."
)
parser.add_argument(
"--use_img", action="store_true", default=False, help="To use the image."
)
parser.add_argument(
"--loss_coeff", type=float, default=9000.0, help="Coefficient for loss term."
)
parser.add_argument(
"--num_grasps", type=int, default=5, help="Number of grasps to train with. "
)
parser.add_argument("--num_actions", type=int, default=50)
parser.add_argument("--use_latent", action="store_true", default=False)
parser.add_argument("--use_recon", action="store_true", default=False)
parser.add_argument(
"--eval",
action="store_true",
default=False,
help="Evaluate the trained model on the test set.",
)
parser.add_argument(
"--budget", type=int, default=5, help="number of graspsp to perform"
)
parser.add_argument(
"--visualize",
action="store_true",
default=False,
help="visualize predictions and actions while evaluating",
)
parser.add_argument(
"--exp_type", type=str, default="test", help="The experiment group."
)
parser.add_argument(
"--greedy_checks",
type=int,
default=50,
help="Number of actions to check at each time step",
)
parser.add_argument(
"--pretrained_recon",
action="store_true",
default=False,
help="use the pretrained reconstruction models to train",
)
parser.add_argument(
"--pretrained",
action="store_true",
default=False,
help="use the pretrained policy",
)
args = parser.parse_args()
trainer = Engine(args)
trainer()
| Active-3D-Vision-and-Touch-main | pterotactyl/policies/dataset_specific/LEBA.py |
Active-3D-Vision-and-Touch-main | pterotactyl/simulator/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
os.environ["PYOPENGL_PLATFORM"] = "egl"
import numpy as np
from scipy.spatial.transform import Rotation as R
import pyrender
import trimesh
import pterotactyl.objects as objects
from pterotactyl.utility import utils
from random import randrange
HAND_COLOUR = [119, 136, 153, 255]
DIGIT_COLOUR = [119, 225, 153, 175]
class Renderer:
def __init__(self, hand, pb, cameraResolution=[256, 256]):
self.scene = self.init_scene()
self.hand = hand
self.pb = pb
self.hand_nodes = []
self.object_nodes = []
self.init_camera()
self.init_hand()
self.update_hand()
self.r = pyrender.OffscreenRenderer(cameraResolution[0], cameraResolution[1])
# scene is initialized with fixed lights, this can be easily changed to match the desired environment
def init_scene(self):
scene = pyrender.Scene(ambient_light=[0.3, 0.3, 0.3])
light_pose = utils.euler2matrix(
angles=[0, 0, 0], translation=[0, -0.8, 0.3], xyz="xyz", degrees=False
)
light = pyrender.PointLight(color=np.ones(3), intensity=1.0)
scene.add(light, pose=light_pose)
light_pose = utils.euler2matrix(
angles=[0, 0, 0], translation=[0, 0.8, 0.3], xyz="xyz", degrees=False
)
light = pyrender.PointLight(color=np.ones(3), intensity=1.0)
scene.add(light, pose=light_pose)
light_pose = utils.euler2matrix(
angles=[0, 0, 0], translation=[-1, 0, 1], xyz="xyz", degrees=False
)
light = pyrender.PointLight(color=np.ones(3), intensity=1.0)
scene.add(light, pose=light_pose)
light_pose = utils.euler2matrix(
angles=[0, 0, 0], translation=[1, 0, 1], xyz="xyz", degrees=False
)
light = pyrender.PointLight(color=np.ones(3), intensity=1.0)
scene.add(light, pose=light_pose)
return scene
def init_camera(self):
# initializes the camera parameters
camera = pyrender.PerspectiveCamera(
yfov=60.0 / 180.0 * np.pi, znear=0.01, zfar=10.0, aspectRatio=1.0
)
camera_pose = utils.euler2matrix(
xyz="xyz", angles=[0, 0, 0], translation=[0, 0, 0], degrees=True
)
camera_node = pyrender.Node(camera=camera, matrix=camera_pose)
self.scene.add_node(camera_node)
self.scene.main_camera_node = camera_node
self.camera = camera_node
# this viewpoint is used in the paper
# if you change this, you will need to update the camaera parameter matrix in the reconstruction model as well
initial_matrix = R.from_euler("xyz", [45.0, 0, 270.0], degrees=True).as_matrix()
self.update_camera_pose([-0.3, 0, 0.3], initial_matrix)
def add_object(
self,
mesh,
position=[0, 0, 0],
orientation=[0, 0, 0],
colour=[228, 217, 111, 255],
):
mesh.visual.vertex_colors = colour
mesh = pyrender.Mesh.from_trimesh(mesh)
pose = utils.euler2matrix(angles=orientation, translation=position)
obj_node = pyrender.Node(mesh=mesh, matrix=pose)
self.scene.add_node(obj_node)
self.object_nodes.append(obj_node)
# defines the hand in the scene
def init_hand(self):
hand_location = os.path.join(
os.path.dirname(objects.__file__), "hand/meshes_obj/"
)
base_obj = trimesh.load(hand_location + "0_base.obj")
base_obj = trimesh.Trimesh(vertices=base_obj.vertices, faces=base_obj.faces)
base_obj.visual.vertex_colors = HAND_COLOUR
self.add_hand_obj(base_obj)
for _ in range(3):
for i in range(1, 5):
element = trimesh.load(hand_location + f"{i}_finger.obj")
element = trimesh.Trimesh(
vertices=element.vertices, faces=element.faces
)
element.visual.vertex_colors = HAND_COLOUR
self.add_hand_obj(element)
element = trimesh.load(hand_location + "5_digit.obj")
element = trimesh.Trimesh(vertices=element.vertices, faces=element.faces)
element.visual.vertex_colors = DIGIT_COLOUR
self.add_hand_obj(element)
for i in range(6, 10):
element = trimesh.load(hand_location + f"{i}_thumb.obj")
element = trimesh.Trimesh(vertices=element.vertices, faces=element.faces)
element.visual.vertex_colors = HAND_COLOUR
self.add_hand_obj(element)
element = trimesh.load(hand_location + "5_digit.obj")
element = trimesh.Trimesh(vertices=element.vertices, faces=element.faces)
element.visual.vertex_colors = DIGIT_COLOUR
self.add_hand_obj(element)
def add_hand_obj(self, obj_location):
mesh = pyrender.Mesh.from_trimesh(obj_location)
pose = utils.euler2matrix(angles=[0, 0, 0], translation=[0, 0, 0])
obj_node = pyrender.Node(mesh=mesh, matrix=pose)
self.scene.add_node(obj_node)
self.hand_nodes.append(obj_node)
# gets the various hand element's position and orientation and uses them to update the hand in the scene
def update_hand(self):
# base of the hand
position, orientation = self.pb.getBasePositionAndOrientation(self.hand)
orientation = self.pb.getEulerFromQuaternion(orientation)
pose = utils.euler2matrix(angles=orientation, translation=position)
self.scene.set_pose(self.hand_nodes[0], pose=pose)
indices = [
0,
1,
2,
3,
4,
7,
8,
9,
10,
11,
14,
15,
16,
17,
18,
21,
22,
23,
24,
25,
]
# all other elements
for node, index in zip(self.hand_nodes[1:], indices):
position, orientation = self.pb.getLinkState(self.hand, index)[:2]
orientation = self.pb.getEulerFromQuaternion(orientation)
pose = utils.euler2matrix(angles=orientation, translation=position)
self.scene.set_pose(node, pose=pose)
# moves the hand our of the perspective of the camera
def remove_hand(self):
for node in self.hand_nodes:
pose = utils.euler2matrix(angles=[0, 0, 0], translation=[0, 0, -10.0])
self.scene.set_pose(node, pose=pose)
def remove_objects(self):
for obj in self.object_nodes:
self.scene.remove_node(obj)
self.object_nodes = []
def update_camera_pose(self, position, orientation):
pose = np.eye(4)
if np.array(orientation).shape == (3,):
orientation = R.from_euler("xyz", orientation, degrees=True).as_matrix()
pose[:3, 3] = position
pose[:3, :3] = orientation
self.camera.matrix = pose
def render(self, get_depth=False):
colour, depth = self.r.render(self.scene)
if get_depth:
return colour, depth
return colour
| Active-3D-Vision-and-Touch-main | pterotactyl/simulator/rendering/vision_renderer.py |
Active-3D-Vision-and-Touch-main | pterotactyl/simulator/rendering/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
os.environ["PYOPENGL_PLATFORM"] = "egl"
import numpy as np
import cv2
import pyrender
import trimesh
from scipy.spatial.transform import Rotation as R
from pterotactyl.utility import utils
class Renderer:
def __init__(self, cameraResolution=[120, 160]):
"""
:param width: scalar
:param height: scalar
"""
self.width = cameraResolution[0]
self.height = cameraResolution[1]
self._background_real = None
self.force_enabled = False
self._init_pyrender()
def _init_pyrender(self):
"""
Initialize pyrender
"""
# Create scene for pybullet sync
self.scene = pyrender.Scene()
self.object_nodes = []
self.current_light_nodes = []
self.cam_light_ids = None
self._init_gel()
self._init_camera()
self._init_light()
self.r = pyrender.OffscreenRenderer(self.width, self.height)
colors, depths = self.render(noise=False, calibration=False)
self._background_sim = colors
def _init_gel(self):
"""
Add gel surface in the scene
"""
# Create gel surface (flat/curve surface based on config file)
gel_trimesh = self._generate_gel_trimesh()
mesh_gel = pyrender.Mesh.from_trimesh(gel_trimesh, smooth=False)
self.gel_pose0 = np.eye(4)
self.gel_node = pyrender.Node(mesh=mesh_gel, matrix=self.gel_pose0)
self.scene.add_node(self.gel_node)
def _generate_gel_trimesh(self):
# Load config
origin = [0.022, 0, 0.015]
X0, Y0, Z0 = origin[0], origin[1], origin[2]
W, H = 0.02, 0.03
# Curved gel surface
N = 100
M = int(N * H / W)
R = 0.1
zrange = 0.005
y = np.linspace(Y0 - W / 2, Y0 + W / 2, N)
z = np.linspace(Z0 - H / 2, Z0 + H / 2, M)
yy, zz = np.meshgrid(y, z)
h = R - np.maximum(0, R ** 2 - (yy - Y0) ** 2 - (zz - Z0) ** 2) ** 0.5
xx = X0 - zrange * h / h.max()
gel_trimesh = self._generate_trimesh_from_depth(xx)
return gel_trimesh
def _generate_trimesh_from_depth(self, depth):
# Load config
origin = [0.022, 0, 0.015]
_, Y0, Z0 = origin[0], origin[1], origin[2]
W, H = 0.02, 0.03
N = depth.shape[1]
M = depth.shape[0]
# Create grid mesh
vertices = []
faces = []
y = np.linspace(Y0 - W / 2, Y0 + W / 2, N)
z = np.linspace(Z0 - H / 2, Z0 + H / 2, M)
yy, zz = np.meshgrid(y, z)
# Vertex format: [x, y, z]
vertices = np.zeros([N * M, 3])
# Add x, y, z position to vertex
vertices[:, 0] = depth.reshape([-1])
vertices[:, 1] = yy.reshape([-1])
vertices[:, 2] = zz.reshape([-1])
# Create faces
faces = np.zeros([(N - 1) * (M - 1) * 6], dtype=np.uint)
# calculate id for each vertex: (i, j) => i * m + j
xid = np.arange(N)
yid = np.arange(M)
yyid, xxid = np.meshgrid(xid, yid)
ids = yyid[:-1, :-1].reshape([-1]) + xxid[:-1, :-1].reshape([-1]) * N
# create upper triangle
faces[::6] = ids # (i, j)
faces[1::6] = ids + N # (i+1, j)
faces[2::6] = ids + 1 # (i, j+1)
# create lower triangle
faces[3::6] = ids + 1 # (i, j+1)
faces[4::6] = ids + N # (i+1, j)
faces[5::6] = ids + N + 1 # (i+1, j+1)
faces = faces.reshape([-1, 3])
# camera_pose = utils.euler2matrix(
# angles=np.deg2rad([90, 0, -90]), translation=[0, 0, 0.015],
# )
vertices = vertices - np.array([0, 0, 0.015]).reshape(1, 3)
orientation = R.from_euler("xyz", [90, 0, -90], degrees=True).as_matrix()
vertices = vertices.dot(orientation)
# position = [0, 0, 0.015]
gel_trimesh = trimesh.Trimesh(vertices=vertices, faces=faces, process=False)
return gel_trimesh
def _init_camera(self):
"""
Set up camera
"""
camera = pyrender.PerspectiveCamera(yfov=np.deg2rad(60), znear=0.001)
camera_pose = utils.euler2matrix(
angles=np.deg2rad([0, 0, 0]), translation=[0, 0, -0.0035]
)
self.camera_pose = camera_pose
# Add camera node into scene
camera_node = pyrender.Node(camera=camera, matrix=camera_pose)
self.scene.add_node(camera_node)
self.camera = camera_node
self.cam_light_ids = list([0, 1, 2])
def _init_light(self):
"""
Set up light
"""
# Load light from config file
origin = np.array([0.005, 0, 0.015])
xyz = []
# Apply polar coordinates
thetas = [30, 150, 270]
rs = [0.02, 0.02, 0.02]
xs = [0, 0, 0]
for i in range(len(thetas)):
theta = np.pi / 180 * thetas[i]
xyz.append([xs[i], rs[i] * np.cos(theta), rs[i] * np.sin(theta)])
colors = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
intensities = [1, 1, 1]
# Save light nodes
self.light_nodes = []
self.light_poses0 = []
for i in range(len(colors)):
color = colors[i]
position = xyz[i] + origin - np.array([0, 0, 0.015])
orientation = R.from_euler("xyz", [90, 0, -90], degrees=True).as_matrix()
position = position.dot(orientation)
orientation = np.deg2rad([90, 0, -90])
light_pose_0 = utils.euler2matrix(angles=orientation, translation=position)
light = pyrender.PointLight(color=color, intensity=intensities[i])
light_node = pyrender.Node(light=light, matrix=light_pose_0)
self.scene.add_node(light_node)
self.light_nodes.append(light_node)
self.light_poses0.append(light_pose_0)
self.current_light_nodes.append(light_node)
def add_object(self, objTrimesh, position=[0, 0, 0], orientation=[0, 0, 0]):
mesh = trimesh.Trimesh(
vertices=objTrimesh.vertices, faces=objTrimesh.faces, process=False
)
mesh = pyrender.Mesh.from_trimesh(mesh, smooth=False)
pose = utils.euler2matrix(angles=orientation, translation=position)
objNode = pyrender.Node(mesh=mesh, matrix=pose)
self.scene.add_node(objNode)
self.object_nodes.append(objNode)
def update_camera_pose(self, position, orientation):
pose = np.eye(4)
pose[:3, 3] = position
pose[:3, :3] = orientation
self.camera.matrix = pose.dot(self.camera_pose)
# Update gel
gel_pose = pose.dot(self.gel_pose0)
self.gel_node.matrix = gel_pose
# Update light
for i in range(len(self.light_nodes)):
light_pose = pose.dot(self.light_poses0[i])
light_node = self.light_nodes[i]
light_node.matrix = light_pose
def update_objects_pose(self, position, orientation):
pose = utils.euler2matrix(angles=orientation, translation=position)
for obj in self.object_nodes:
self.scene.set_pose(obj, pose=pose)
def remove_objects(self):
for obj in self.object_nodes:
self.scene.remove_node(obj)
self.object_nodes = []
def update_light(self, lightIDList):
"""
Update the light node based on lightIDList, remove the previous light
"""
# Remove previous light nodes
for node in self.current_light_nodes:
self.scene.remove_node(node)
# Add light nodes
self.current_light_nodes = []
for i in lightIDList:
light_node = self.light_nodes[i]
self.scene.add_node(light_node)
self.current_light_nodes.append(light_node)
def _add_noise(self, color):
"""
Add Gaussian noise to the RGB image
:param color:
:return:
"""
# Add noise to the RGB image
mean = 0
std = 7
noise = np.random.normal(mean, std, color.shape) # Gaussian noise
color = np.clip(color + noise, 0, 255).astype(np.uint8) # Add noise and clip
return color
def _calibrate(self, color):
if self._background_real is not None:
# Simulated difference image, with scaling factor 0.5
diff = (color.astype(np.float) - self._background_sim) * 0.5
# Add low-pass filter to match real readings
diff = cv2.GaussianBlur(diff, (7, 7), 0)
# Combine the simulated difference image with real background image
color = np.clip((diff[:, :, :3] + self._background_real), 0, 255).astype(
np.uint8
)
return color
def _post_process(self, color, depth, noise=True, calibration=True):
if calibration:
color = self._calibrate(color)
if noise:
color = self._add_noise(color)
return color, depth
def render(self, noise=True, calibration=True):
self.scene.main_camera_node = self.camera
self.update_light(self.cam_light_ids)
color, depth = self.r.render(self.scene)
color, depth = self._post_process(color, depth, noise, calibration)
return color, depth
| Active-3D-Vision-and-Touch-main | pterotactyl/simulator/rendering/tacto_renderer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
os.environ["PYOPENGL_PLATFORM"] = "egl"
import numpy as np
import pyrender
from pterotactyl.utility import utils
class Renderer:
def __init__(self, cameraResolution=[120, 160]):
self.scene = pyrender.Scene(ambient_light=[0.1, 0.1, 0.1])
self.object_nodes = []
self.initialize_camera()
self.r = pyrender.OffscreenRenderer(cameraResolution[0], cameraResolution[1])
def initialize_camera(self):
camera = pyrender.PerspectiveCamera(
yfov=40.0 / 180.0 * np.pi, znear=0.0001, zfar=10.0
)
self.camera_pose = utils.euler2matrix(
xyz="xyz", angles=[0, 0, 0], translation=[0, 0, 0], degrees=True
)
# Add camera node into scene
camera_node = pyrender.Node(camera=camera, matrix=self.camera_pose)
self.scene.add_node(camera_node)
self.camera = camera_node
def add_object(self, objTrimesh, position=[0, 0, 0], orientation=[0, 0, 0]):
mesh = pyrender.Mesh.from_trimesh(objTrimesh)
pose = utils.euler2matrix(angles=orientation, translation=position)
objNode = pyrender.Node(mesh=mesh, matrix=pose)
self.scene.add_node(objNode)
self.object_nodes.append(objNode)
def update_objects_pose(self, position, orientation):
pose = utils.euler2matrix(angles=orientation, translation=position)
for obj in self.object_nodes:
self.scene.set_pose(obj, pose=pose)
def remove_objects(self):
for obj in self.object_nodes:
self.scene.remove_node(obj)
self.object_nodes = []
def update_camera_pose(self, position, orientation):
pose = np.eye(4)
pose[:3, 3] = position
pose[:3, :3] = orientation
self.camera.matrix = pose.dot(self.camera_pose)
def render(self):
self.scene.main_camera_node = self.camera
_, depth = self.r.render(self.scene)
return depth
| Active-3D-Vision-and-Touch-main | pterotactyl/simulator/rendering/touch_renderer.py |
Active-3D-Vision-and-Touch-main | pterotactyl/simulator/physics/__init__.py |
|
#Copyright (c) Facebook, Inc. and its affiliates.
#All rights reserved.
#This source code is licensed under the license found in the
#LICENSE file in the root directory of this source tree.
import numpy as np
import trimesh
from scipy.spatial.transform import Rotation as R
from scipy.spatial import ConvexHull
from pterotactyl.utility import utils
class Agnostic_Grasp:
def __init__(self, pb, hand):
self.pb = pb
self.hand = hand
self.directions = -utils.get_circle(50).points.data.numpy()
self.convex_mesh = None
self.verts = None
def set_object(self, verts, faces):
hull = ConvexHull(verts.data.numpy())
self.convex_mesh = trimesh.Trimesh(
vertices=verts, faces=hull.simplices, process=False
)
self.verts = verts.data.numpy()
def remove_object(self):
self.convex_mesh = None
self.verts = None
# converts selected action into the corresponding hand rotation
def action_to_params(
self, action
): # converts action selection into hand parameters
direction = self.directions[action]
rotation = 0
return direction, rotation
def grasp(self, action):
self.reset_hand()
direction, rotation = self.action_to_params(
action
) # convert action into grasping parameters
success = self.set_hand_hull(
direction, rotation
) # identify point on convex hull which intersection the chosen hand direction
# if no intersection is found
if not success:
return False
else:
# set all joint angles to maximum to perfrom grasp
joint_angles = [10 for _ in range(28)]
self.pb.setJointMotorControlArray(
self.hand,
range(28),
self.pb.POSITION_CONTROL,
targetPositions=joint_angles,
)
for i in range(5):
self.pb.stepSimulation()
return True
def set_hand_hull(self, direction, rotation, hand_distance=0.013):
# define ray from the center of the object to outwards in the chosen direction
ray_origins = np.array([[0, 0, 0]])
ray_directions = np.array([direction])
# find intersection with ray and convex hull
locations, index_ray, index_tri = self.convex_mesh.ray.intersects_location(
ray_origins=ray_origins, ray_directions=ray_directions
)
# if no intersection were found
if len(locations) == 0:
return False
else:
# find furtherest interesection from the ceneter of the object
test_locations = np.array(locations)
test_locations = (test_locations ** 2).sum(axis=-1)
max_location = np.argmax(test_locations)
point = locations[max_location]
face = self.convex_mesh.faces[index_tri[0]]
# place the hand above the convex hull at the intersection point
hand_position, surface_normal = self.get_position_on_hull(
self.verts, face, point, hand_distance
)
hand_orientation = self.pb.getQuaternionFromEuler([rotation, 0, 0])
surface_normal -= 0.001
handUpdateOrientation = utils.quats_from_vectors([-1, 0, 0], surface_normal)
hand_orientation = utils.combine_quats(
handUpdateOrientation, hand_orientation
)
# place the middle finger tip on the point instead of the hand center
# displacement of the fingertip from the center of the hand
v = [0, 0, 0.133]
matrix = (R.from_quat(hand_orientation)).as_matrix()
hand_position -= matrix.dot(v)
# transfrom the hand
self.pb.resetBasePositionAndOrientation(
self.hand, hand_position, hand_orientation
)
return True
# find the normal face which the ray intersections with, and a point just above the siurface in this direction
def get_position_on_hull(self, verts, face, point, distance):
p1, p2, p3 = verts[face[0]], verts[face[1]], verts[face[2]]
normal = utils.normal_from_triangle(p1, p2, p3)
p1 = np.array([0, 0, 0])
p2 = point
p3 = point + normal * 0.0001
# check the normal is pointing away from the mesh
if ((p1 - p2) ** 2).sum() > ((p1 - p3) ** 2).sum():
normal = normal * -1
# move position of the finger to slightly above the mesh
point = point + normal * distance
return point, normal
def reset_hand(self):
# moves hand away from the object to avoid intersections
self.pb.resetBasePositionAndOrientation(self.hand, [20, 0, 0], [1, 0, 0, 0])
# sets all joints to the initial angles
joint_angles = [0 for _ in range(28)]
# sets thumb as oppositng fingers
joint_angles[20] = 1.2
joint_angles[22] = 0.7
for i in range(28):
self.pb.resetJointState(self.hand, i, joint_angles[i])
| Active-3D-Vision-and-Touch-main | pterotactyl/simulator/physics/grasping.py |
#Copyright (c) Facebook, Inc. and its affiliates.
#All rights reserved.
#This source code is licensed under the license found in the
#LICENSE file in the root directory of this source tree.
import os
import pybullet as pb
import numpy as np
import trimesh
import torch
from scipy.spatial.transform import Rotation as R
from scipy import ndimage
from pterotactyl.simulator.rendering import touch_renderer
from pterotactyl.simulator.rendering import tacto_renderer
from pterotactyl.simulator.rendering import vision_renderer
from pterotactyl.utility import utils
import pterotactyl.objects as objects
class Scene:
def __init__(
self,
grasp_class,
max_depth=0.025,
conn=pb,
vision=True,
resolution=[256, 256],
object_colour=[228, 217, 111, 255],
TACTO=False,
):
hand_location = os.path.join(
os.path.dirname(objects.__file__), "hand/allegro_hand.urdf"
)
self.hand = conn.loadURDF(
hand_location,
[0, 0, 0],
conn.getQuaternionFromEuler([0, 0, 0]),
useFixedBase=1,
)
# the indices of the hand definition which correspond to the finger's perspective
self.touch_cameras = [6, 13, 20, 27]
# furthest distance from the fingers which is obseravble by the touch sensors
self.max_depth = max_depth
if TACTO:
self.max_depth = min(self.max_depth, 0.015)
self.pb = conn
self.obj = None
self.grasper = grasp_class(self.pb, self.hand)
self.depths = None
self.TACTO = TACTO
# if vision signals are desired
self.vision = vision
if self.vision:
self.object_colour = object_colour
self.camera_renderer = vision_renderer.Renderer(
self.hand, pb, cameraResolution=resolution
)
if self.TACTO:
self.touch_renderer = tacto_renderer.Renderer(cameraResolution=[121, 121])
else:
self.touch_renderer = touch_renderer.Renderer(cameraResolution=[121, 121])
def grasp(self, action):
return self.grasper.grasp(action)
def get_hand_pose(self):
poses = []
for i in range(28):
poses.append(self.get_pose(self.hand, i))
return poses
def get_pose(self, objID, linkID):
if linkID <= 0:
position, orientation = self.pb.getBasePositionAndOrientation(objID)
else:
position, orientation = self.pb.getLinkState(
objID, linkID, computeLinkVelocity=False, computeForwardKinematics=True
)[:2]
orientation = self.pb.getEulerFromQuaternion(orientation)
return position, orientation
def load_obj(self, verts, faces, urdf_location):
# adding repeating faces to ensure they are observed
faces = utils.add_faces(faces)
# loading into pybullet
self.obj = self.pb.loadURDF(
urdf_location, [0, 0, 0], [0, 0, 0, 1], useFixedBase=1
)
# loading into pyrender
mesh = trimesh.Trimesh(vertices=verts, faces=faces, process=False)
self.touch_renderer.add_object(mesh, position=[0, 0, 0], orientation=[0, 0, 0])
if self.vision:
self.camera_renderer.add_object(
mesh,
position=[0, 0, 0],
orientation=[0, 0, 0],
colour=self.object_colour,
)
# loading into grasp function
self.obj_verts = torch.FloatTensor(verts)
self.obj_faces = torch.LongTensor(faces)
self.grasper.set_object(self.obj_verts, self.obj_faces)
def remove_obj(self):
if self.obj is not None:
self.pb.removeBody(self.obj)
self.touch_renderer.remove_objects()
self.obj = None
self.hull_faces = None
if self.vision:
self.camera_renderer.remove_objects()
self.grasper.remove_object()
# render depth from the perspective of each finger
def render_depth(self):
statuses = []
depths = []
colours = []
for i in range(4):
# update position of the scene camera
position, orientation = self.get_pose(self.hand, self.touch_cameras[i])
rot_off_finger = R.from_euler("xyz", [0, -90, 0], degrees=True).as_matrix()
rot_finger = R.from_euler("xyz", orientation, degrees=False).as_matrix()
orientation_update = np.matmul(rot_finger, rot_off_finger)
self.touch_renderer.update_camera_pose(
position=position, orientation=orientation_update
)
# render depth
if self.TACTO:
colour, depth = self.touch_renderer.render()
colours.append(colour)
else:
depth = self.touch_renderer.render()
# check if object is close enough to register on touch sensor
if (depth <= self.max_depth).sum() - (depth == 0).sum() > 0:
statuses.append("touch")
else:
statuses.append("no_touch")
depths.append(depth)
self.depths = depths
self.statuses = statuses
if self.TACTO:
self.colours = colours
return statuses
# converts depth map into point cloud in the reference frame of the object
def depth_to_points(self):
if self.TACTO:
fov = 60.0 / 180.0 * np.pi # intrinsic camera parameter
else:
fov = 40.0 / 180.0 * np.pi # intrinsic camera parameter
points = []
depths = np.array(self.depths)
out_of_range = depths > self.max_depth
# sets depth beyond touch sensor to 1
depths[out_of_range] = 1.0
# sets infinite depth to 1 instead of 0
depths[depths == 0] = 1
for i in range(4):
if self.statuses[i] == "touch":
depth = depths[i]
# creates grid of points
ys = np.arange(0, 121)
ys = np.tile(ys, (121, 1)) - 60
ys = ys.transpose()
xs = ys.transpose()
# updates grid with depth
point_cloud = np.zeros((121, 121, 3))
angle = np.arctan((np.abs(xs) / 60.0) * np.tan(fov / 2.0))
point_cloud[:, :, 0] = depth * np.tan(angle) * np.sign(xs)
angle = np.arctan((np.abs(ys) / 60.0) * np.tan(fov / 2.0))
point_cloud[:, :, 1] = depth * np.tan(angle) * -np.sign(ys)
point_cloud[:, :, 2] = -depth
# removes depth beyond sensor range
point_cloud = point_cloud[depth < 1.0]
point_cloud = point_cloud.reshape((-1, 3))
# transforms points to reference frame of the finger
position, orientation = self.get_pose(self.hand, self.touch_cameras[i])
rot_z = np.array([0, -90.0, 0])
r1 = R.from_euler("xyz", rot_z, degrees=True).as_matrix()
r2 = R.from_euler("xyz", orientation, degrees=False).as_matrix()
orientation = np.matmul(r2, r1)
if self.TACTO:
point_cloud[:, -1] = point_cloud[:, -1] - 0.0035
point_cloud = orientation.dot(point_cloud.T).T + position
points.append(point_cloud)
else:
points.append(np.array([]))
return points
# simulates touch signal from depth
def depth_to_touch(self, depth):
# set depth which werent obsevred to 1 instead of zero
out_of_range = depth > self.max_depth
depth[out_of_range] = 1.0
depth[depth == 0] = 1
dim = depth.shape[-1]
zeros = depth >= self.max_depth
depth = -(depth - self.max_depth)
depth[zeros] = 0
gel_depths = depth * 6 / self.max_depth
# smooth depth values
depth = gel_depths / (30.0) + 0.4
filter_size = 7
k = np.ones((filter_size, filter_size)) / (filter_size ** 2)
depth_smoothed = ndimage.convolve(depth, k, mode="reflect")
# fix "infinite" depths to zeros
depth[zeros] = depth_smoothed[zeros]
# add rgb and ambient lights
light_positions = np.array(
[[-0.5, 0.5, 1.0], [1.3, -0.4, 1.0], [1.3, 1.4, 1.0]]
)
# set to zero, qulitativly better
ambient_intensity = np.array([0.0, 0.0, 0.0])
diffuse_constant = 2.0
touch = np.zeros((dim, dim, 3))
touch[:, :] += ambient_intensity
# calculate normal of surface
zy, zx = np.gradient(depth)
normal = np.dstack((-zx, -zy, np.ones_like(depth)))
normal = utils.normalize_vector(normal)
# calc depth positions
depth_positions = np.arange(dim).repeat(dim).reshape(dim, dim) / float(dim)
depth_positions = np.stack(
(depth_positions, depth_positions.transpose(), depth)
).transpose((1, 2, 0))
# compute intensity from light normal using phong model, assuming no specularity
for i in range(3):
light_direction = light_positions[i] - depth_positions
light_direction = utils.normalize_vector(light_direction)
touch[:, :, i] += np.clip(
diffuse_constant * np.multiply(normal, light_direction).sum(-1), 0, 1
)
touch = np.clip(touch * 255.0, 0, 255) # clip within reasonable range
return touch
def render_touch(self):
touches = []
depths = np.array(self.depths)
if self.TACTO:
return self.colours
else:
for depth in depths:
touches.append(self.depth_to_touch(depth))
return touches
def get_finger_frame(self):
positions = []
rots = []
for i in range(4):
position, orientation = self.get_pose(self.hand, self.touch_cameras[i])
rot = R.from_euler("xyz", orientation, degrees=False).as_matrix()
positions.append(position)
rots.append(rot)
frame = {"pos": torch.FloatTensor(positions), "rot_M": torch.FloatTensor(rots)}
return frame
def scene_render(self, occluded=True, parameters=None):
if occluded:
self.camera_renderer.update_hand()
else:
self.camera_renderer.remove_hand()
if parameters is not None:
self.camera_renderer.update_camera_pose(parameters[0], parameters[1])
image = self.camera_renderer.render()
return image
| Active-3D-Vision-and-Touch-main | pterotactyl/simulator/scene/instance.py |
Active-3D-Vision-and-Touch-main | pterotactyl/simulator/scene/__init__.py |
|
#Copyright (c) Facebook, Inc. and its affiliates.
#All rights reserved.
#This source code is licensed under the license found in the
#LICENSE file in the root directory of this source tree.
import os
import numpy as np
import pybullet_utils.bullet_client as bc
import pybullet as pb
import pybullet_data
import torch
from pterotactyl.simulator.scene import instance
from pterotactyl.utility import utils
class Sampler:
def __init__(
self,
grasp_class,
bs=1,
vision=True,
max_depth=0.025,
object_colours=[228, 217, 111, 255],
resolution=[256, 256],
TACTO=False,
):
self.pybullet_connections = []
self.pybullet_scenes = []
self.bs = bs
self.vision = vision
# make a connection for every element in the batch
for i in range(bs):
self.pybullet_connections.append(bc.BulletClient(connection_mode=pb.DIRECT))
self.pybullet_connections[i].setAdditionalSearchPath(
pybullet_data.getDataPath()
)
if np.array(object_colours).shape == (4,):
colour = object_colours
else:
colour = object_colours[i]
self.pybullet_scenes.append(
instance.Scene(
grasp_class,
max_depth=max_depth,
conn=self.pybullet_connections[i],
vision=self.vision,
object_colour=colour,
resolution=resolution,
TACTO=TACTO,
)
)
# disconnets the pybullet threads
def disconnect(self):
for i in range(self.bs):
self.pybullet_connections[i].disconnect()
# loads the objects into each pybullet thread
def load_objects(self, batch, from_dataset=True, scale=3.1):
self.remove_objects()
assert len(batch) == self.bs
for i in range(self.bs):
obj_location = batch[i]
# if the object information has already been extracted
if from_dataset:
verts = np.load(obj_location + "_verts.npy")
faces = np.load(obj_location + "_faces.npy")
faces = utils.add_faces(faces)
urdf_location = obj_location + ".urdf"
# extract and record the object information
else:
obj_location = obj_location + ".obj"
urdf_location = obj_location + ".urdf"
verts, faces = utils.get_obj_data(obj_location, scale=scale)
utils.make_urdf(verts, faces, urdf_location)
self.pybullet_scenes[i].load_obj(verts, faces, urdf_location)
def remove_objects(self):
for i in range(self.bs):
self.pybullet_scenes[i].remove_obj()
def grasp(self, i, actions):
return self.pybullet_scenes[i].grasp(actions[i])
# perfrom the grasp and extracted the requested information
def sample(
self,
actions,
touch=True,
touch_point_cloud=False,
vision=False,
vision_occluded=False,
parameters=None,
):
success = []
poses = []
dict = {}
# check if the grasps are feasible
for i in range(self.bs):
# perfrom the grasps
success.append(self.grasp(i, actions))
if success[-1]:
poses.append(self.pybullet_scenes[i].get_hand_pose())
else:
poses.append(None)
dict["hand_pose"] = poses
# get touch signal from grasp
if touch:
touch_status = [
["no_intersection" for _ in range(4)] for _ in range(self.bs)
]
touch_signal = torch.zeros((self.bs, 4, 121, 121, 3))
depths = torch.zeros((self.bs, 4, 121, 121))
finger_transform_pos = torch.zeros((self.bs, 4, 3))
finger_transform_rot_M = torch.zeros((self.bs, 4, 3, 3))
for i in range(self.bs):
if success[i]:
# depth from camera
touch_status[i] = self.pybullet_scenes[i].render_depth()
# simulated touch from depth
touch = self.pybullet_scenes[i].render_touch()
ref_frame = self.pybullet_scenes[i].get_finger_frame()
touch_signal[i] = torch.FloatTensor(touch)
depths[i] = torch.FloatTensor(self.pybullet_scenes[i].depths)
finger_transform_pos[i] = torch.FloatTensor(ref_frame["pos"])
finger_transform_rot_M[i] = torch.FloatTensor(ref_frame["rot_M"])
dict["touch_status"] = touch_status
dict["touch_signal"] = touch_signal
dict["depths"] = depths
dict["finger_transfrom_pos"] = finger_transform_pos
dict["finger_transform_rot_M"] = finger_transform_rot_M
# get pointcloud of touch site in the object frame of reference
if touch_point_cloud:
point_clouds = []
for i in range(self.bs):
point_clouds.append(self.pybullet_scenes[i].depth_to_points())
dict["touch_point_cloud"] = point_clouds
# get image of the grasp
if vision_occluded:
vision_occluded_imgs = []
for i in range(self.bs):
if parameters is not None:
param = parameters[i]
else:
param = None
img = self.pybullet_scenes[i].scene_render(
occluded=True, parameters=param
)
vision_occluded_imgs.append(img)
dict["vision_occluded"] = vision_occluded_imgs
# get image of the object
if vision:
vision_imgs = []
for i in range(self.bs):
if parameters is not None:
param = parameters[i]
else:
param = None
img = self.pybullet_scenes[i].scene_render(
occluded=False, parameters=param
)
vision_imgs.append(img)
dict["vision"] = vision_imgs
return dict
| Active-3D-Vision-and-Touch-main | pterotactyl/simulator/scene/sampler.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| Active-3D-Vision-and-Touch-main | pterotactyl/object_data/__init__.py |
Active-3D-Vision-and-Touch-main | pterotactyl/reconstruction/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch.nn as nn
import torch
import numpy as np
from torch.nn.parameter import Parameter
import torch.nn.functional as F
# class for the autoencoder
# for extracting latent vector from predicted shape
class AutoEncoder(nn.Module):
def __init__(self, adj_info, inital_positions, args, only_encode=False):
super(AutoEncoder, self).__init__()
self.adj_info = adj_info
self.initial_positions = inital_positions
self.args = args
# feature size passed to the GCN
input_size = 50
self.only_encode = only_encode
self.positional_encoder = Positional_Encoder(input_size)
self.mask_encoder = Mask_Encoder(input_size)
self.encoder = Encoder(input_size, args)
if not self.only_encode:
self.decoder = Decoder(args).cuda()
def forward(self, verts, mask, only_encode=False):
positional_features = self.positional_encoder(verts)
mask_features = self.mask_encoder(mask)
# combine mesh features
vertex_features = positional_features + mask_features
latent = self.encoder(vertex_features, self.adj_info)
if self.only_encode or only_encode:
return latent
pred_points = self.decoder(latent)
return pred_points.permute(0, 2, 1), latent
# encoder for the auto encoder
class Encoder(nn.Module):
def __init__(self, input_features, args):
super(Encoder, self).__init__()
self.num_layers = args.num_GCN_layers
# define output sizes for each GCN layer
hidden_values = [input_features] + [
args.hidden_GCN_size for _ in range(self.num_layers)
]
# define layers
layers = []
for i in range(self.num_layers):
layers.append(
GCN_layer(
hidden_values[i],
hidden_values[i + 1],
args.cut,
do_cut=i < self.num_layers - 1,
)
)
self.layers = nn.ModuleList(layers)
# MLP layers
hidden_values = [args.hidden_GCN_size, 500, 400, 300, args.encoding_size]
num_layers = len(hidden_values) - 1
layers = []
for i in range(num_layers):
if i < num_layers - 1:
layers.append(
nn.Sequential(
nn.Linear(hidden_values[i], hidden_values[i + 1]), nn.ReLU()
)
)
else:
layers.append(
nn.Sequential(nn.Linear(hidden_values[i], hidden_values[i + 1]))
)
self.mlp = nn.Sequential(*layers)
def forward(self, features, adj_info):
adj = adj_info["adj"]
for i in range(self.num_layers):
activation = F.relu if i < self.num_layers - 1 else lambda x: x
features = self.layers[i](features, adj, activation)
features = features.max(dim=1)[0]
features = self.mlp(features)
return features
# Graph convolutional network layer
class GCN_layer(nn.Module):
def __init__(self, in_features, out_features, cut=0.33, do_cut=True):
super(GCN_layer, self).__init__()
self.weight = Parameter(torch.Tensor(1, in_features, out_features))
self.bias = Parameter(torch.Tensor(out_features))
self.reset_parameters()
self.cut_size = cut
self.do_cut = do_cut
def reset_parameters(self):
stdv = 6.0 / math.sqrt((self.weight.size(1) + self.weight.size(0)))
stdv *= 0.3
self.weight.data.uniform_(-stdv, stdv)
self.bias.data.uniform_(-0.1, 0.1)
def forward(self, features, adj, activation):
features = torch.matmul(features, self.weight)
# if we want to only share a subset of features with neighbors
if self.do_cut:
length = round(features.shape[-1] * self.cut_size)
output = torch.matmul(adj, features[:, :, :length])
output = torch.cat((output, features[:, :, length:]), dim=-1)
output[:, :, :length] += self.bias[:length]
else:
output = torch.matmul(adj, features)
output = output + self.bias
return activation(output)
# decoder for the autoencoder
# this is just Foldingnet
class Decoder(nn.Module):
def __init__(self, args, rank=0):
super(Decoder, self).__init__()
self.model = FoldingNetDec(rank=rank)
self.initial = nn.Linear(args.encoding_size, 512)
def forward(self, features):
features = self.initial(features)
points = self.model(features)
return points
# foldingnet definition
class FoldingNetDecFold1(nn.Module):
def __init__(self):
super(FoldingNetDecFold1, self).__init__()
self.conv1 = nn.Conv1d(514, 512, 1)
self.conv2 = nn.Conv1d(512, 512, 1)
self.conv3 = nn.Conv1d(512, 3, 1)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.conv1(x))
x = self.relu(self.conv2(x))
x = self.conv3(x)
return x
# foldingnet definition
def GridSamplingLayer(batch_size, meshgrid):
ret = np.meshgrid(*[np.linspace(it[0], it[1], num=it[2]) for it in meshgrid])
ndim = len(meshgrid)
grid = np.zeros(
(np.prod([it[2] for it in meshgrid]), ndim), dtype=np.float32
) # MxD
for d in range(ndim):
grid[:, d] = np.reshape(ret[d], -1)
g = np.repeat(grid[np.newaxis, ...], repeats=batch_size, axis=0)
return g
# foldingnet definition
class FoldingNetDecFold2(nn.Module):
def __init__(self):
super(FoldingNetDecFold2, self).__init__()
self.conv1 = nn.Conv1d(515, 512, 1)
self.conv2 = nn.Conv1d(512, 512, 1)
self.conv3 = nn.Conv1d(512, 3, 1)
self.relu = nn.ReLU()
def forward(self, x): # input x = batch,515,45^2
x = self.relu(self.conv1(x))
x = self.relu(self.conv2(x))
x = self.conv3(x)
return x
# foldingnet definition
class FoldingNetDec(nn.Module):
def __init__(self, rank=0):
super(FoldingNetDec, self).__init__()
self.rank = rank
self.fold1 = FoldingNetDecFold1()
self.fold2 = FoldingNetDecFold2()
def forward(self, x):
batch_size = x.size(0)
x = torch.unsqueeze(x, 1) # x = batch,1,512
x = x.repeat(1, 80 ** 2, 1) # x = batch,45^2,512
code = x.transpose(2, 1) # x = batch,512,45^2
meshgrid = [[-0.5, 0.5, 80], [-0.5, 0.5, 80]]
grid = GridSamplingLayer(batch_size, meshgrid) # grid = batch,45^2,2
grid = torch.from_numpy(grid).cuda(self.rank)
x = torch.cat((x, grid), 2) # x = batch,45^2,514
x = x.transpose(2, 1) # x = batch,514,45^2
x = self.fold1(x) # x = batch,3,45^2
x = torch.cat((code, x), 1) # x = batch,515,45^2
x = self.fold2(x) # x = batch,3,45^2
return x
# encode the positional information of vertices using Nerf Embeddings
class Positional_Encoder(nn.Module):
def __init__(self, input_size):
super(Positional_Encoder, self).__init__()
layers = []
layers.append(
nn.Linear(63, input_size // 4)
) # 10 nerf layers + original positions
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Linear(input_size // 4, input_size // 2))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Linear(input_size // 2, input_size))
self.model = nn.Sequential(*layers)
# apply nerf embedding of the positional information
def nerf_embedding(self, points):
embeddings = []
for i in range(10):
if i == 0:
embeddings.append(torch.sin(np.pi * points))
embeddings.append(torch.cos(np.pi * points))
else:
embeddings.append(torch.sin(np.pi * 2 * i * points))
embeddings.append(torch.cos(np.pi * 2 * i * points))
embeddings = torch.cat(embeddings, dim=-1)
return embeddings
def forward(self, positions):
shape = positions.shape
positions = positions.contiguous().view(shape[0] * shape[1], -1)
# combine nerf embedding with origional positions
positions = torch.cat((self.nerf_embedding((positions)), positions), dim=-1)
embeding = self.model(positions).view(shape[0], shape[1], -1)
return embeding
# make embedding token of the mask information for each vertex
class Mask_Encoder(nn.Module):
def __init__(self, input_size):
super(Mask_Encoder, self).__init__()
layers_mask = []
layers_mask.append(nn.Embedding(4, input_size))
self.model = nn.Sequential(*layers_mask)
def forward(self, mask):
shape = mask.shape
mask = mask.contiguous().view(-1, 1)
embeding_mask = self.model(mask.long()).view(shape[0], shape[1], -1)
return embeding_mask
| Active-3D-Vision-and-Touch-main | pterotactyl/reconstruction/autoencoder/model.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import torch
import numpy as np
from tqdm import tqdm
import argparse
import random
from PIL import Image
from torch.utils.tensorboard import SummaryWriter
import torch.optim as optim
from torch.utils.data import DataLoader
from pterotactyl.reconstruction.autoencoder import model
from pterotactyl.utility import utils
from pterotactyl.utility import data_loaders
from pterotactyl.reconstruction.vision import model as vision_model
import pterotactyl.objects as objects
from pterotactyl import pretrained
import pterotactyl.object_data as object_data
IMAGE_LOCATION = os.path.join(os.path.dirname(object_data.__file__), "images_colourful/")
class Engine:
def __init__(self, args):
# set seeds
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
# set initial data values
self.epoch = 0
self.best_loss = 10000
self.args = args
self.last_improvement = 0
self.vision_chart_location = os.path.join(
os.path.dirname(objects.__file__), "vision_charts.obj"
)
self.checkpoint_dir = os.path.join(
"experiments/checkpoint/", args.exp_type, args.exp_id
)
if not os.path.exists(self.checkpoint_dir):
os.makedirs(self.checkpoint_dir)
self.results_dir = os.path.join("results", self.args.exp_type, self.args.exp_id)
if not os.path.exists(self.results_dir):
os.makedirs(self.results_dir)
utils.save_config(self.checkpoint_dir, args)
def __call__(self) -> float:
# define the model and optimizer
vision_args, weights = utils.load_model_config(self.args.vision_location)
self.mesh_info, self.initial_mesh = utils.load_mesh_vision(
vision_args, self.vision_chart_location
)
self.initial_mesh = self.initial_mesh.cuda()
self.n_vision_charts = self.initial_mesh.shape[0]
self.deform = vision_model.Deformation(
self.mesh_info, self.initial_mesh, vision_args
).cuda()
self.deform.load_state_dict(torch.load(weights))
self.auto_encoder = model.AutoEncoder(
self.mesh_info, self.initial_mesh, self.args
)
params = list(self.auto_encoder.parameters())
self.auto_encoder.cuda()
self.optimizer = optim.Adam(params, lr=self.args.lr, weight_decay=0)
self.load()
# logging information
writer = SummaryWriter(
os.path.join("experiments/tensorboard/", self.args.exp_type)
)
self.train_loss = 0
# get data
train_loader, valid_loaders = self.get_loaders()
# evaluate on the test set
if self.args.eval:
self.load()
with torch.no_grad():
self.validate(valid_loaders, writer)
return
# train and validate
for epoch in range(0, self.args.epochs):
self.epoch = epoch
self.train(train_loader, writer)
with torch.no_grad():
self.validate(valid_loaders, writer)
self.check_values()
# get dataloaders
def get_loaders(self):
train_loader, valid_loader = "", ""
# training loader
if not self.args.eval:
train_data = data_loaders.mesh_loader_vision(
self.args, set_type="auto_train"
)
train_loader = DataLoader(
train_data,
batch_size=self.args.batch_size,
shuffle=True,
num_workers=16,
collate_fn=train_data.collate,
)
# evaluation loaders
set_type = "test" if self.args.eval else "valid"
valid_data = data_loaders.mesh_loader_vision(self.args, set_type=set_type)
valid_loader = DataLoader(
valid_data,
batch_size=self.args.batch_size,
shuffle=False,
num_workers=16,
collate_fn=valid_data.collate,
)
return train_loader, valid_loader
def train(self, data, writer):
total_loss = 0
iterations = 0
self.auto_encoder.train()
for k, batch in enumerate(tqdm(data, smoothing=0)):
self.optimizer.zero_grad()
# initialize data
img = batch["img"].cuda()
# inference
with torch.no_grad():
charts = vision_model.prepare_mesh(batch, self.initial_mesh, self.args)
verts, mask = self.deform(img, charts)
pred_points, latent = self.auto_encoder(verts.detach(), mask)
loss = utils.chamfer_distance(
verts.detach(),
self.mesh_info["faces"],
pred_points,
num=self.args.number_points,
)
loss = self.args.loss_coeff * loss.mean()
# backprop
loss.backward()
self.optimizer.step()
# log
message = f"Train || Epoch: {self.epoch}, loss: {loss.item():.2f}, b_ptp: {self.best_loss:.2f}"
tqdm.write(message)
total_loss += loss.item()
iterations += 1.0
self.train_loss = total_loss / iterations
writer.add_scalars(
"train_loss", {self.args.exp_id: total_loss / iterations}, self.epoch
)
def validate(self, valid_loader, writer):
total_loss = 0
self.auto_encoder.eval()
num_examples = 0
latents = []
names = []
for v, batch in enumerate(tqdm(valid_loader)):
self.optimizer.zero_grad()
# initialize data
img = batch["img"].cuda()
batch_size = img.shape[0]
# inference
charts = vision_model.prepare_mesh(batch, self.initial_mesh, self.args)
verts, mask = self.deform(img, charts)
pred_points, latent = self.auto_encoder(verts.detach(), mask)
names += batch["names"]
latents.append(latent)
loss = utils.chamfer_distance(
verts.detach(),
self.mesh_info["faces"],
pred_points,
num=self.args.number_points,
)
loss = self.args.loss_coeff * loss.mean() * batch_size
# logs
num_examples += float(batch_size)
total_loss += loss
total_loss = total_loss / num_examples
message = f"Valid || Epoch: {self.epoch}, train loss: {self.train_loss:.4f}, val loss: {total_loss:.4f}, b_ptp: {self.best_loss:.4f}"
tqdm.write(message)
print("*******************************************************")
print(f"Validation Accuracy: {total_loss}")
print("*******************************************************")
if not self.args.eval:
writer.add_scalars("valid_ptp", {self.args.exp_id: total_loss}, self.epoch)
self.current_loss = total_loss
if self.args.eval:
latents = torch.cat(latents)
self.cluster(latents, names)
# save the model
def save(self):
torch.save(self.auto_encoder.state_dict(), self.checkpoint_dir + "/model")
torch.save(self.optimizer.state_dict(), self.checkpoint_dir + "/optim")
# load the model
def load(self):
if self.args.eval and self.args.pretrained:
if self.args.use_img:
if self.args.finger:
location_vision = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/vision/v_t_p/"
)
location_auto = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/auto/v_t_p/"
)
else:
location_vision = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/vision/v_t_g/"
)
location_auto = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/auto/v_t_g/"
)
else:
if self.args.finger:
location_vision = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/vision/t_p/"
)
location_auto = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/auto/t_p/"
)
else:
location_vision = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/vision/t_g/"
)
location_auto = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/auto/t_g/"
)
# define the vision model
vision_args, _ = utils.load_model_config(location_vision)
weights = location_vision + 'model'
self.mesh_info, self.initial_mesh = utils.load_mesh_vision(
vision_args, self.vision_chart_location
)
self.initial_mesh = self.initial_mesh.cuda()
self.n_vision_charts = self.initial_mesh.shape[0]
self.deform = vision_model.Deformation(
self.mesh_info, self.initial_mesh, vision_args
)
self.deform.cuda()
self.deform.load_state_dict(torch.load(weights))
self.deform.eval()
# define the autoencoder model
auto_args, _ = utils.load_model_config(location_auto)
weights = location_auto + '/model'
self.auto_encoder = model.AutoEncoder(
self.mesh_info, self.initial_mesh, auto_args
)
self.auto_encoder.cuda()
self.auto_encoder.load_state_dict(torch.load(weights))
else:
try:
self.auto_encoder.load_state_dict(
torch.load(self.checkpoint_dir + "/model")
)
self.optimizer.load_state_dict(
torch.load(self.checkpoint_dir + "/optim")
)
except:
return
# check if current validation is better, and if so save model
def check_values(self):
if self.best_loss >= self.current_loss:
improvement = self.best_loss - self.current_loss
print(
f"Saving with {improvement:.3f} improvement in Chamfer Distance on Validation Set "
)
self.best_loss = self.current_loss
self.last_improvement = 0
self.save()
else:
self.last_improvement += 1
if self.last_improvement >= self.args.patience:
print(f"Over {self.args.patience} steps since last imporvement")
print("Exiting now")
exit()
def cluster(self, latents, names):
example_nums = 20
crop = 20
img_dim = 256
examples = random.choices(range(latents.shape[0]), k=example_nums)
collage = Image.new(
"RGB", ((img_dim - crop * 2) * 5, (img_dim - crop * 2) * example_nums)
)
for v, e in enumerate(examples):
new_im = Image.new("RGB", (img_dim * 5, img_dim))
l = latents[e]
main_obj = names[e][0].split("/")[-1]
imgs = [os.path.join(IMAGE_LOCATION, main_obj + ".npy")]
seen = [main_obj]
compare_latents = latents - l.unsqueeze(0)
compare_latents = (compare_latents ** 2).sum(-1)
closest = torch.topk(compare_latents, 25, largest=False)[1][1:]
for c in closest:
obj = names[c][0].split("/")[-1]
if obj in seen:
continue
seen.append(obj)
imgs.append(os.path.join(IMAGE_LOCATION, obj + ".npy"))
for i in range(5):
im = Image.fromarray(np.load(imgs[i]))
new_im.paste(im, (i * img_dim, 0))
new_im.save(f"{self.results_dir}/valid_{v}.png")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--cut",
type=float,
default=0.33,
help="The shared size of features in the GCN.",
)
parser.add_argument(
"--limit_data",
action="store_true",
default=False,
help="use less data, for debugging.",
)
parser.add_argument(
"--finger", action="store_true", default=False, help="use only one finger."
)
parser.add_argument(
"--vision_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/vision/t_p/",
help="the location of the deformation prediction.",
)
parser.add_argument(
"--number_points",
type=int,
default=30000,
help="number of points sampled for the chamfer distance.",
)
parser.add_argument(
"--encoding_size", type=int, default=200, help="size of the latent vector"
)
parser.add_argument(
"--seed", type=int, default=0, help="Setting for the random seed."
)
parser.add_argument(
"--lr", type=float, default=0.0003, help="Initial learning rate."
)
parser.add_argument(
"--eval",
action="store_true",
default=False,
help="Evaluate the trained model on the test set.",
)
parser.add_argument("--batch_size", type=int, default=16, help="Size of the batch.")
parser.add_argument(
"--val_grasps",
type=int,
default=-1,
help="number of grasps to use during validation.",
)
parser.add_argument(
"--exp_id", type=str, default="test", help="The experiment name."
)
parser.add_argument(
"--exp_type", type=str, default="test", help="The experiment group."
)
parser.add_argument(
"--use_img", action="store_true", default=False, help="To use the image."
)
parser.add_argument(
"--use_touch",
action="store_true",
default=False,
help="To use the touch information.",
)
parser.add_argument(
"--patience",
type=int,
default=70,
help="How many epochs without imporvement before training stops.",
)
parser.add_argument(
"--loss_coeff", type=float, default=9000.0, help="Coefficient for loss term."
)
parser.add_argument(
"--num_GCN_layers",
type=int,
default=20,
help="Number of GCN layers in the mesh deformation network.",
)
parser.add_argument(
"--hidden_GCN_size",
type=int,
default=300,
help="Size of the feature vector for each GCN layer in the mesh deformation network.",
)
parser.add_argument(
"--num_grasps", type=int, default=5, help="Number of grasps to train with. "
)
parser.add_argument(
"--epochs", type=int, default=1000, help="Number of epochs to use."
)
parser.add_argument(
"--pretrained",
action="store_true",
default=False,
help="load the pretrained model",
)
args = parser.parse_args()
trainer = Engine(args)
trainer()
| Active-3D-Vision-and-Touch-main | pterotactyl/reconstruction/autoencoder/train.py |
Active-3D-Vision-and-Touch-main | pterotactyl/reconstruction/touch/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
# CNN block
class DoubleConv(nn.Module):
def __init__(self, in_channels, out_channels, last=False):
super().__init__()
self.last = last
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=5, padding=2, stride=2),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size=5, padding=2),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size=5, padding=2),
)
self.activation = nn.Sequential(
nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.double_conv(x)
if not self.last:
x = self.activation(x)
return x
# Model for predicting touch chart shape
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
# CNN
CNN_layers = []
CNN_layers.append(DoubleConv(3, 16))
CNN_layers.append(DoubleConv(16, 32))
CNN_layers.append(DoubleConv(32, 32))
CNN_layers.append(DoubleConv(32, 64))
CNN_layers.append(DoubleConv(64, 128))
CNN_layers.append(DoubleConv(128, 128, last=True))
self.CNN_layers = nn.Sequential(*CNN_layers)
# MLP
layers = []
layers.append(nn.Sequential(nn.Linear(512, 256), nn.ReLU()))
layers.append(nn.Sequential(nn.Linear(256, 128), nn.ReLU()))
layers.append(nn.Sequential(nn.Linear(128, 75)))
self.fc = nn.Sequential(*layers)
def predict_verts(self, touch):
for layer in self.CNN_layers:
touch = layer(touch)
points = touch.contiguous().view(-1, 512)
points = self.fc(points)
return points
# tranform the predicted shape into the reference frame of the sensro
def transform_verts(self, verts, ref):
pos = ref["pos"].cuda().view(-1, 1, 3).repeat(1, verts.shape[1], 1)
rot = ref["rot"].cuda()
verts = torch.bmm(rot, verts.permute(0, 2, 1)).permute(0, 2, 1)
verts += pos
return verts
def forward(self, gel, ref_frame, verts):
verts = verts + self.predict_verts(gel).view(-1, verts.shape[1], 3)
verts = self.transform_verts(verts, ref_frame)
return verts
| Active-3D-Vision-and-Touch-main | pterotactyl/reconstruction/touch/model.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import torch
from tqdm import tqdm
import argparse
from torch.utils.tensorboard import SummaryWriter
import torch.optim as optim
from torch.utils.data import DataLoader
from pterotactyl.reconstruction.touch import model
from pterotactyl.utility import utils
from pterotactyl.utility import data_loaders
import pterotactyl.objects as objects
from pterotactyl import pretrained
class Engine:
def __init__(self, args):
utils.set_seeds(args.seed)
self.epoch = 0
self.best_loss = 10000
self.args = args
self.last_improvement = 0
self.checkpoint_dir = os.path.join(
"experiments/checkpoint/", args.exp_type, args.exp_id
)
if not os.path.exists(self.checkpoint_dir):
os.makedirs(self.checkpoint_dir)
utils.save_config(self.checkpoint_dir, args)
chart_location = os.path.join(
os.path.dirname(objects.__file__), "touch_chart.obj"
)
self.verts, self.faces = utils.load_mesh_touch(chart_location)
self.verts = self.verts.view(1, self.verts.shape[0], 3).repeat(
args.batch_size, 1, 1
)
def __call__(self):
self.encoder = model.Encoder()
self.encoder.cuda()
params = list(self.encoder.parameters())
self.optimizer = optim.Adam(params, lr=self.args.lr)
writer = SummaryWriter(
os.path.join("experiments/tensorboard/", self.args.exp_type)
)
train_loader, valid_loader = self.get_loaders()
# evaluate
if self.args.eval:
self.load()
with torch.no_grad():
self.validate(valid_loader, writer)
return
# train and validate
else:
for epoch in range(self.args.epochs):
self.epoch = epoch
self.train(train_loader, writer)
with torch.no_grad():
self.validate(valid_loader, writer)
self.check_values()
# get the dataloaders
def get_loaders(self):
train_loader, valid_loader = "", ""
# dataloader for training
if not self.args.eval:
train_data = data_loaders.mesh_loader_touch(
self.args, set_type="recon_train"
)
train_loader = DataLoader(
train_data,
batch_size=self.args.batch_size,
shuffle=True,
num_workers=16,
collate_fn=train_data.collate,
)
# dataloader for evaluation
set_type = "test" if self.args.eval else "valid"
valid_data = data_loaders.mesh_loader_touch(self.args, set_type=set_type)
valid_loader = DataLoader(
valid_data,
batch_size=self.args.batch_size,
shuffle=False,
num_workers=16,
collate_fn=valid_data.collate,
)
return train_loader, valid_loader
def train(self, data, writer):
total_loss = 0
iterations = 0
self.encoder.train()
for k, batch in enumerate(tqdm(data)):
# initialize
self.optimizer.zero_grad()
sim_touch = batch["sim_touch"].cuda()
ref_frame = batch["ref"]
gt_points = batch["samples"].cuda()
batch_size = gt_points.shape[0]
# inference
pred_verts = self.encoder(
sim_touch, ref_frame, self.verts.clone()[:batch_size]
)
loss = self.args.loss_coeff * utils.chamfer_distance(
pred_verts, self.faces, gt_points, self.args.num_samples
)
loss = loss.mean()
total_loss += loss.data.cpu().numpy()
# backprop
loss.backward()
self.optimizer.step()
# log
message = f"Train || Epoch: {self.epoch}, loss: {loss.item():.5f} "
message += f"|| best_loss: {self.best_loss :.5f}"
tqdm.write(message)
iterations += 1.0
writer.add_scalars(
"train", {self.args.exp_id: total_loss / iterations}, self.epoch
)
def validate(self, valid_loader, writer):
total_loss = 0
self.encoder.eval()
num_examples = 0
for k, batch in enumerate(tqdm(valid_loader)):
# initialize data
sim_touch = batch["sim_touch"].cuda()
ref_frame = batch["ref"]
gt_points = batch["samples"].cuda()
batch_size = gt_points.shape[0]
# inference
pred_verts = self.encoder(
sim_touch, ref_frame, self.verts.clone()[:batch_size]
)
# back prop
loss = self.args.loss_coeff * utils.chamfer_distance(
pred_verts, self.faces, gt_points, self.args.num_samples
)
loss = loss.mean()
num_examples += float(batch_size)
total_loss += loss * float(batch_size)
total_loss = total_loss / float(num_examples)
# log
print("*******************************************************")
print(f"Total validation loss: {total_loss}")
print("*******************************************************")
if not self.args.eval:
writer.add_scalars("valid", {self.args.exp_id: total_loss}, self.epoch)
self.current_loss = total_loss
# save the model
def save(self):
torch.save(self.encoder.state_dict(), self.checkpoint_dir + "/model")
torch.save(self.optimizer.state_dict(), self.checkpoint_dir + "/optim")
# check if the latest validation is better, save if so
def check_values(self):
if self.best_loss >= self.current_loss:
improvement = self.best_loss - self.current_loss
self.best_loss = self.current_loss
print(f"Saving Model with a {improvement} improvement in point loss")
self.save()
self.last_improvement = 0
else:
self.last_improvement += 1
if self.last_improvement == self.args.patience:
print(f"Over {self.args.patience} steps since last imporvement")
print("Exiting now")
exit()
print("*******************************************************")
# load the model
def load(self):
if self.args.eval and self.args.pretrained:
location = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/touch/best/model"
)
self.encoder.load_state_dict(torch.load(location))
else:
self.encoder.load_state_dict(torch.load(self.checkpoint_dir + "/model"))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--seed", type=int, default=0, help="Setting for the random seed."
)
parser.add_argument(
"--limit_data",
action="store_true",
default=False,
help="reduces the number of data examples",
)
parser.add_argument(
"--epochs", type=int, default=1000, help="Number of epochs to use."
)
parser.add_argument(
"--lr", type=float, default=0.0001, help="Initial learning rate."
)
parser.add_argument(
"--eval",
action="store_true",
default=False,
help="Evaluate the trained model on the test set.",
)
parser.add_argument("--batch_size", type=int, default=64, help="Size of the batch.")
parser.add_argument(
"--num_samples",
type=int,
default=4000,
help="Number of points in the predicted point cloud.",
)
parser.add_argument(
"--patience",
type=int,
default=70,
help="How many epochs without imporvement before training stops.",
)
parser.add_argument(
"--loss_coeff", type=float, default=9000.0, help="Coefficient for loss term."
)
parser.add_argument(
"--exp_id", type=str, default="test", help="The experiment name"
)
parser.add_argument(
"--exp_type", type=str, default="test", help="The experiment group"
)
parser.add_argument(
"--pretrained",
action="store_true",
default=False,
help="load the pretrained model",
)
args = parser.parse_args()
trainer = Engine(args)
trainer()
| Active-3D-Vision-and-Touch-main | pterotactyl/reconstruction/touch/train.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch.nn as nn
import torch
import numpy as np
from torch.nn.parameter import Parameter
import torch.nn.functional as F
from PIL import Image
# basic CNN layer template
def CNN_layer(f_in, f_out, k, stride=1, simple=False, padding=1):
layers = []
if not simple:
layers.append(nn.BatchNorm2d(int(f_in)))
layers.append(nn.ReLU(inplace=True))
layers.append(
nn.Conv2d(int(f_in), int(f_out), kernel_size=k, padding=padding, stride=stride)
)
return nn.Sequential(*layers)
# network for making image features for vertex feature vectors
class Image_Encoder(nn.Module):
def __init__(self, args):
super(Image_Encoder, self).__init__()
# CNN sizes
cur_size = 3
next_size = 16
# layers for the CNN
layers = []
layers.append(
CNN_layer(cur_size, cur_size, args.CNN_ker_size, stride=1, simple=True)
)
for i in range(args.num_CNN_blocks):
layers.append(CNN_layer(cur_size, next_size, args.CNN_ker_size, stride=2))
cur_size = next_size
next_size = next_size * 2
for j in range(args.layers_per_block - 1):
layers.append(CNN_layer(cur_size, cur_size, args.CNN_ker_size))
self.args = args
self.layers = nn.ModuleList(layers)
# camera parameters
f = 221.7025
RT = np.array(
[
[
-7.587616579485257e-08,
-1.0000001192092896,
0.0,
-2.2762851159541242e-08,
],
[-0.7071068286895752, 7.587616579485257e-08, -0.7071068286895752, 0.0],
[0.7071068286895752, 0.0, -0.7071067690849304, 0.4242640733718872],
]
)
K = np.array([[f, 0, 128.0], [0, f, 128.0], [0, 0, 1]])
# projection matrix
self.matrix = torch.FloatTensor(K.dot(RT)).cuda()
# defines image features over vertices from vertex positions, and feature mpas from vision
def pooling(self, blocks, verts_pos):
# convert vertex positions to x,y coordinates in the image, scaled to fractions of image dimension
ext_verts_pos = torch.cat(
(
verts_pos,
torch.FloatTensor(
np.ones([verts_pos.shape[0], verts_pos.shape[1], 1])
).cuda(),
),
dim=-1,
)
ext_verts_pos = torch.matmul(ext_verts_pos, self.matrix.permute(1, 0))
ext_verts_pos[:, :, 2][ext_verts_pos[:, :, 2] == 0] = 0.1
xs = ext_verts_pos[:, :, 1] / ext_verts_pos[:, :, 2] / 256.0
xs[torch.isinf(xs)] = 0.5
ys = ext_verts_pos[:, :, 0] / ext_verts_pos[:, :, 2] / 256.0
ys[torch.isinf(ys)] = 0.5
full_features = None
xs = xs.unsqueeze(2).unsqueeze(3)
ys = ys.unsqueeze(2).unsqueeze(3)
grid = torch.cat([ys, xs], 3)
grid = grid * 2 - 1
# extract image features based on vertex projected positions
for block in blocks:
features = torch.nn.functional.grid_sample(block, grid, align_corners=True)
if full_features is None:
full_features = features
else:
full_features = torch.cat((full_features, features), dim=1)
vert_image_features = full_features[:, :, :, 0].permute(0, 2, 1)
return vert_image_features
# Examines the projection of points into image space and displayes the image
# This is only for debugging purposes
def debug_pooling(self, img, points):
# convert vertex positions to x,y coordinates in the image, scaled to fractions of image dimension
ext_verts_pos = torch.cat(
(
points,
torch.FloatTensor(
np.ones([points.shape[0], points.shape[1], 1])
).cuda(),
),
dim=-1,
)
ext_verts_pos = torch.matmul(ext_verts_pos, self.matrix.permute(1, 0))
xs = ext_verts_pos[:, :, 1] / ext_verts_pos[:, :, 2] / 256.0
ys = ext_verts_pos[:, :, 0] / ext_verts_pos[:, :, 2] / 256.0
for xses, yses, i in zip(xs, ys, img):
i = (255 * i.permute(1, 2, 0)).data.cpu().numpy().astype(np.uint8)
for x, y in zip(xses, yses):
x = int(x * 255)
if x > 255:
x = 255
if x < 0:
x = 0
y = int(y * 255)
if y > 255:
y = 255
if y < 0:
y = 0
i[x, y, 0] = 255.0
i[x, y, 1] = 0
i[x, y, 2] = 0
Image.fromarray(i).save("debug_img.png")
print("Image of point projection has been saved to debug_img.png")
print("press enter to continue")
input()
print("*" * 15)
print()
exit()
def forward(self, img):
x = img
features = []
# layers to select image features from
layer_selections = [
len(self.layers) - 1 - (i + 1) * self.args.layers_per_block
for i in range(3)
]
for e, layer in enumerate(self.layers):
# if too many layers are applied the map size will be lower then then kernel size
if x.shape[-1] < self.args.CNN_ker_size:
break
x = layer(x)
# collect feature maps
if e in layer_selections:
features.append(x)
features.append(x)
return features
# Class for defroming the charts into the traget shape
class Deformation(nn.Module):
def __init__(
self, adj_info, inital_positions, args, return_img=False, pass_img=False
):
super(Deformation, self).__init__()
self.adj_info = adj_info
self.initial_positions = inital_positions
self.args = args
self.return_img = return_img
self.pass_img = pass_img
# add image encoder and get image feature size
if args.use_img:
self.img_encoder_global = Image_Encoder(args).cuda()
self.img_encoder_local = Image_Encoder(args).cuda()
with torch.no_grad():
img_features = self.img_encoder_global(
torch.zeros(1, 3, 256, 256).cuda()
)
vert_positions = torch.zeros(1, 1, 3).cuda()
input_size = self.img_encoder_global.pooling(
img_features, vert_positions
).shape[-1]
else:
# if no image features fix the feature size at 50
input_size = 50
# add positional and mask enocoder and GCN deformation networks
self.positional_encoder = Positional_Encoder(input_size)
self.mask_encoder = Mask_Encoder(input_size)
self.mesh_deform_1 = GCN(
input_size, args, ignore_touch_matrix=args.use_img
).cuda()
self.mesh_deform_2 = GCN(input_size, args).cuda()
def forward(self, img, charts, img_features=None):
# number of vision charts
vc_length = charts["vision_charts"].clone().shape[1]
# get image features
if self.pass_img and img_features is not None:
global_img_features, local_img_features = img_features
elif self.args.use_img:
global_img_features = self.img_encoder_global(img)
local_img_features = self.img_encoder_local(img)
else:
global_img_features, local_img_features = [], []
##### first iteration #####
# if we are using only touch then we need to use touch information immediately
if self.args.use_touch and not self.args.use_img:
# use touch information
vertices = torch.cat(
(charts["vision_charts"].clone(), charts["touch_charts"].clone()), dim=1
)
mask = torch.cat(
(charts["vision_masks"].clone(), charts["touch_masks"].clone()), dim=1
)
positional_features = self.positional_encoder(vertices)
mask_features = self.mask_encoder(mask)
vertex_features = positional_features + mask_features
# in all other setting we only use vision
else:
vertices = charts["vision_charts"].clone()
mask = charts["vision_masks"].clone()
positional_features = self.positional_encoder(vertices)
mask_features = self.mask_encoder(mask)
vertex_features = positional_features + mask_features
# use vision information
if self.args.use_img:
img_features = self.img_encoder_global.pooling(
global_img_features, vertices
)
vertex_features += img_features
# perfrom the first deformation
update = self.mesh_deform_1(vertex_features, self.adj_info)
# update positions of vision charts only
vertices[:, :vc_length] = vertices[:, :vc_length] + update[:, :vc_length]
##### second loop #####
# add touch information if not already present
if self.args.use_touch and self.args.use_img:
vertices = torch.cat((vertices, charts["touch_charts"].clone()), dim=1)
mask = torch.cat(
(charts["vision_masks"].clone(), charts["touch_masks"].clone()), dim=1
)
mask_features = self.mask_encoder(mask)
positional_features = self.positional_encoder(vertices)
vertex_features = positional_features + mask_features
# add image information
if self.args.use_img:
img_features = self.img_encoder_global.pooling(local_img_features, vertices)
vertex_features += img_features
# perfrom the second deformation
update = self.mesh_deform_2(vertex_features, self.adj_info)
# update positions of vision charts only
vertices[:, :vc_length] = vertices[:, :vc_length] + update[:, :vc_length]
##### third loop #####
positional_features = self.positional_encoder(vertices)
mask_features = self.mask_encoder(mask)
vertex_features = positional_features + mask_features
if self.args.use_img:
img_features = self.img_encoder_global.pooling(local_img_features, vertices)
vertex_features += img_features
# perfrom the third deformation
update = self.mesh_deform_2(vertex_features, self.adj_info)
# update positions of vision charts only
vertices[:, :vc_length] = vertices[:, :vc_length] + update[:, :vc_length]
if self.return_img:
return vertices, mask, [global_img_features, local_img_features]
return vertices, mask
# Graph convolutional network class for predicting mesh deformation
class GCN(nn.Module):
def __init__(self, input_features, args, ignore_touch_matrix=False):
super(GCN, self).__init__()
#
self.ignore_touch_matrix = ignore_touch_matrix
self.num_layers = args.num_GCN_layers
# define output sizes for each GCN layer
hidden_values = (
[input_features]
+ [args.hidden_GCN_size for _ in range(self.num_layers - 1)]
+ [3]
)
# define layers
layers = []
for i in range(self.num_layers):
layers.append(
GCN_layer(
hidden_values[i],
hidden_values[i + 1],
args.cut,
do_cut=i < self.num_layers - 1,
)
)
self.layers = nn.ModuleList(layers)
def forward(self, features, adj_info):
if self.ignore_touch_matrix:
adj = adj_info["origional"]
else:
adj = adj_info["adj"]
# iterate through GCN layers
for i in range(self.num_layers):
activation = F.relu if i < self.num_layers - 1 else lambda x: x
features = self.layers[i](features, adj, activation)
if torch.isnan(features).any():
print(features)
print("here", i, self.num_layers)
input()
return features
# Graph convolutional network layer
class GCN_layer(nn.Module):
def __init__(self, in_features, out_features, cut=0.33, do_cut=True):
super(GCN_layer, self).__init__()
self.weight = Parameter(torch.Tensor(1, in_features, out_features))
self.bias = Parameter(torch.Tensor(out_features))
self.reset_parameters()
self.cut_size = cut
self.do_cut = do_cut
def reset_parameters(self):
stdv = 6.0 / math.sqrt((self.weight.size(1) + self.weight.size(0)))
stdv *= 0.3
self.weight.data.uniform_(-stdv, stdv)
self.bias.data.uniform_(-0.1, 0.1)
def forward(self, features, adj, activation):
features = torch.matmul(features, self.weight)
# uf we want to only share a subset of features with neighbors
if self.do_cut:
length = round(features.shape[-1] * self.cut_size)
output = torch.matmul(adj, features[:, :, :length])
output = torch.cat((output, features[:, :, length:]), dim=-1)
output[:, :, :length] += self.bias[:length]
else:
output = torch.matmul(adj, features)
output = output + self.bias
return activation(output)
# encode the positional information of vertices using Nerf Embeddings
class Positional_Encoder(nn.Module):
def __init__(self, input_size):
super(Positional_Encoder, self).__init__()
layers = []
layers.append(
nn.Linear(63, input_size // 4)
) # 10 nerf layers + original positions
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Linear(input_size // 4, input_size // 2))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Linear(input_size // 2, input_size))
self.model = nn.Sequential(*layers)
# apply nerf embedding of the positional information
def nerf_embedding(self, points):
embeddings = []
for i in range(10):
if i == 0:
embeddings.append(torch.sin(np.pi * points))
embeddings.append(torch.cos(np.pi * points))
else:
embeddings.append(torch.sin(np.pi * 2 * i * points))
embeddings.append(torch.cos(np.pi * 2 * i * points))
embeddings = torch.cat(embeddings, dim=-1)
return embeddings
def forward(self, positions):
shape = positions.shape
positions = positions.contiguous().view(shape[0] * shape[1], -1)
# combine nerf embedding with origional positions
positions = torch.cat((self.nerf_embedding((positions)), positions), dim=-1)
embeding = self.model(positions).view(shape[0], shape[1], -1)
return embeding
# make embedding token of the mask information for each vertex
class Mask_Encoder(nn.Module):
def __init__(self, input_size):
super(Mask_Encoder, self).__init__()
layers_mask = []
layers_mask.append(nn.Embedding(4, input_size))
self.model = nn.Sequential(*layers_mask)
def forward(self, mask):
shape = mask.shape
mask = mask.contiguous().view(-1, 1)
embeding_mask = self.model(mask.long()).view(shape[0], shape[1], -1)
return embeding_mask
# takes as input the touch information, and makes it a mart of the input mesh
def prepare_mesh(batch, vision_mesh, args):
s1 = batch["img"].shape[0]
if args.use_touch:
touch_info = batch["touch_charts"].cuda().view(s1, -1, 4)
touch_charts = touch_info[:, :, :3]
touch_masks = touch_info[:, :, 3:]
# combine vision charts into a single mesh
vision_charts = vision_mesh.unsqueeze(0).repeat(s1, 1, 1)
vision_masks = 3 * torch.ones(vision_charts.shape[:-1]).cuda().unsqueeze(-1)
charts = {
"touch_charts": touch_charts,
"vision_charts": vision_charts,
"touch_masks": touch_masks,
"vision_masks": vision_masks,
}
else:
# combine vision charts into a single mesh
vision_charts = vision_mesh.unsqueeze(0).repeat(s1, 1, 1)
vision_masks = 3 * torch.ones(vision_charts.shape[:-1]).cuda().unsqueeze(-1)
charts = {"vision_charts": vision_charts, "vision_masks": vision_masks}
return charts
| Active-3D-Vision-and-Touch-main | pterotactyl/reconstruction/vision/model.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import torch
import numpy as np
from tqdm import tqdm
import argparse
from torch.utils.tensorboard import SummaryWriter
import torch.optim as optim
from torch.utils.data import DataLoader
from submitit.helpers import Checkpointable
from pterotactyl.reconstruction.vision import model
from pterotactyl.utility import utils
from pterotactyl.utility import data_loaders
import pterotactyl.objects as objects
from pterotactyl import pretrained
class Engine(Checkpointable):
def __init__(self, args):
# set seeds
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
# set initial data values
self.epoch = 0
self.best_loss = 10000
self.args = args
self.last_improvement = 0
self.vision_chart_location = os.path.join(
os.path.dirname(objects.__file__), "vision_charts.obj"
)
self.results_dir = os.path.join("results", args.exp_type, args.exp_id)
if not os.path.exists(self.results_dir):
os.makedirs(self.results_dir)
self.checkpoint_dir = os.path.join(
"experiments/checkpoint/", args.exp_type, args.exp_id
)
if not os.path.exists(self.checkpoint_dir):
os.makedirs(self.checkpoint_dir)
if not self.args.eval:
utils.save_config(self.checkpoint_dir, args)
def __call__(self) -> float:
# compute mesh statistics
self.mesh_info, self.initial_mesh = utils.load_mesh_vision(
self.args, self.vision_chart_location
)
self.initial_mesh = self.initial_mesh.cuda()
self.n_vision_charts = self.initial_mesh.shape[0]
# define the model and optimizer
self.encoder = model.Deformation(self.mesh_info, self.initial_mesh, self.args)
self.encoder.cuda()
if not self.args.eval:
params = list(self.encoder.parameters())
self.optimizer = optim.Adam(params, lr=self.args.lr, weight_decay=0)
# logging information
writer = SummaryWriter(
os.path.join("experiments/tensorboard/", self.args.exp_type)
)
# get data
train_loader, valid_loaders = self.get_loaders()
# evaluate of the test set
if self.args.eval:
self.load()
with torch.no_grad():
self.validate(valid_loaders, writer)
return
# train and validate
else:
self.load()
for epoch in range(self.epoch, self.args.epochs):
self.epoch = epoch
self.train(train_loader, writer)
with torch.no_grad():
self.validate(valid_loaders, writer)
self.check_values()
# get dataloaders
def get_loaders(self):
train_loader, valid_loader = "", ""
if not self.args.eval:
# training dataloader
train_data = data_loaders.mesh_loader_vision(
self.args, set_type="recon_train"
)
train_loader = DataLoader(
train_data,
batch_size=self.args.batch_size,
shuffle=True,
num_workers=16,
collate_fn=train_data.collate,
)
# evaluation dataloder
set_type = "test" if self.args.eval else "valid"
valid_data = data_loaders.mesh_loader_vision(self.args, set_type=set_type)
valid_loader = DataLoader(
valid_data,
batch_size=self.args.batch_size,
shuffle=False,
num_workers=16,
collate_fn=valid_data.collate,
)
return train_loader, valid_loader
def train(self, data, writer):
total_loss = 0
iterations = 0
self.encoder.train()
for k, batch in enumerate(tqdm(data, smoothing=0)):
self.optimizer.zero_grad()
# initialize data
img = batch["img"].cuda()
gt_points = batch["gt_points"].cuda()
# for debugging , if you want to change the camera view, reach out to [email protected]
# self.encoder.img_encoder_global.debug_pooling(img, gt_points)
# self.encoder.img_encoder_global.debug_pooling(img, self.initial_mesh.unsqueeze(0).repeat(img.shape[0], 1, 1))
# inference
with torch.no_grad():
charts = model.prepare_mesh(batch, self.initial_mesh, self.args)
verts = self.encoder(img, charts)[0]
loss = utils.chamfer_distance(
verts, self.mesh_info["faces"], gt_points, num=self.args.number_points
)
loss = self.args.loss_coeff * loss.mean()
# backprop
loss.backward()
self.optimizer.step()
# log
message = f"Train || Epoch: {self.epoch}, loss: {loss.item():.2f}, b_ptp: {self.best_loss:.2f}"
tqdm.write(message)
total_loss += loss.item()
iterations += 1.0
writer.add_scalars(
"train_loss", {self.args.exp_id: total_loss / iterations}, self.epoch
)
def validate(self, valid_loader, writer):
total_loss = 0
self.encoder.eval()
num_examples = 0
observations = []
names = []
for v, batch in enumerate(tqdm(valid_loader)):
# initialize data
names += batch["names"]
img = batch["img"].cuda()
gt_points = batch["gt_points"].cuda()
batch_size = img.shape[0]
# inference
charts = model.prepare_mesh(batch, self.initial_mesh, self.args)
ps = list(self.encoder.parameters())
ps = torch.cat([p.flatten() for p in ps])
verts = self.encoder(img, charts)[0]
observations.append(verts)
loss = utils.chamfer_distance(
verts, self.mesh_info["faces"], gt_points, num=self.args.number_points
)
loss = self.args.loss_coeff * loss.sum()
# logs
num_examples += float(batch_size)
total_loss += loss
message = f"Valid || Epoch: {self.epoch}, ave: {total_loss / num_examples:.4f}, b_ptp: {self.best_loss:.2f}"
tqdm.write(message)
if self.args.visualize and v == 5 and self.args.eval:
meshes = torch.cat(observations, dim=0)[:, :, :3]
names = [n[0] for n in names]
utils.visualize_prediction(
self.results_dir, meshes, self.mesh_info["faces"], names
)
total_loss = total_loss / num_examples
print("*******************************************************")
print(f"Validation Accuracy: {total_loss}")
print("*******************************************************")
if not self.args.eval:
writer.add_scalars("valid_ptp", {self.args.exp_id: total_loss}, self.epoch)
self.current_loss = total_loss
# save the model
def save(self):
torch.save(self.encoder.state_dict(), self.checkpoint_dir + "/model")
torch.save(self.optimizer.state_dict(), self.checkpoint_dir + "/optim")
np.save(self.checkpoint_dir + "/epoch.npy", np.array([self.epoch + 1]))
# load the model
def load(self):
if self.args.eval and self.args.pretrained:
if self.args.use_img:
if self.args.finger:
location = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/vision/v_t_p/"
)
else:
location = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/vision/v_t_g/"
)
else:
if self.args.finger:
location = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/vision/t_p/"
)
else:
location = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/vision/t_g/"
)
vision_args, _ = utils.load_model_config(location)
weights = location + 'model'
self.mesh_info, self.initial_mesh = utils.load_mesh_vision(
vision_args, self.vision_chart_location
)
self.initial_mesh = self.initial_mesh.cuda()
self.n_vision_charts = self.initial_mesh.shape[0]
# define the model and optimizer
self.encoder = model.Deformation(
self.mesh_info, self.initial_mesh, vision_args
)
self.encoder.cuda()
self.encoder.load_state_dict(torch.load(weights))
else:
try:
self.encoder.load_state_dict(torch.load(self.checkpoint_dir + "/model"))
self.optimizer.load_state_dict(
torch.load(self.checkpoint_dir + "/optim")
)
self.epoch = np.load(self.checkpoint_dir + "/epoch.npy")[0]
except:
return
# check if the latest validation beats the previous, and save model if so
def check_values(self):
if self.best_loss >= self.current_loss:
improvement = self.best_loss - self.current_loss
print(
f"Saving with {improvement:.3f} improvement in Chamfer Distance on Validation Set "
)
self.best_loss = self.current_loss
self.last_improvement = 0
self.save()
else:
self.last_improvement += 1
if self.last_improvement >= self.args.patience:
print(f"Over {self.args.patience} steps since last imporvement")
print("Exiting now")
exit()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--cut",
type=float,
default=0.33,
help="The shared size of features in the GCN.",
)
parser.add_argument(
"--epochs", type=int, default=1000, help="Number of epochs to use."
)
parser.add_argument(
"--limit_data",
action="store_true",
default=False,
help="use less data, for debugging.",
)
parser.add_argument(
"--finger", action="store_true", default=False, help="use only one finger."
)
parser.add_argument(
"--number_points",
type=int,
default=30000,
help="number of points sampled for the chamfer distance.",
)
parser.add_argument(
"--seed", type=int, default=0, help="Setting for the random seed."
)
parser.add_argument(
"--lr", type=float, default=0.0003, help="Initial learning rate."
)
parser.add_argument(
"--eval",
action="store_true",
default=False,
help="Evaluate the trained model on the test set.",
)
parser.add_argument("--batch_size", type=int, default=16, help="Size of the batch.")
parser.add_argument(
"--val_grasps",
type=int,
default=-1,
help="number of grasps to use during validation.",
)
parser.add_argument(
"--exp_id", type=str, default="test", help="The experiment name."
)
parser.add_argument(
"--exp_type", type=str, default="test", help="The experiment group."
)
parser.add_argument(
"--use_img", action="store_true", default=False, help="To use the image."
)
parser.add_argument(
"--use_touch",
action="store_true",
default=False,
help="To use the touch information.",
)
parser.add_argument(
"--patience",
type=int,
default=70,
help="How many epochs without imporvement before training stops.",
)
parser.add_argument(
"--loss_coeff", type=float, default=9000.0, help="Coefficient for loss term."
)
parser.add_argument(
"--num_CNN_blocks",
type=int,
default=6,
help="Number of image blocks in the CNN.",
)
parser.add_argument(
"--layers_per_block",
type=int,
default=3,
help="Number of image layers in each block in the CNN.",
)
parser.add_argument(
"--CNN_ker_size",
type=int,
default=5,
help="Size of the image kernel in each CNN layer.",
)
parser.add_argument(
"--num_GCN_layers",
type=int,
default=20,
help="Number of GCN layers in the mesh deformation network.",
)
parser.add_argument(
"--hidden_GCN_size",
type=int,
default=300,
help="Size of the feature vector for each GCN layer in the mesh deformation network.",
)
parser.add_argument(
"--num_grasps", type=int, default=5, help="Number of grasps to train with. "
)
parser.add_argument(
"--visualize",
action="store_true",
default=False,
help="visualize predictions and actions while evaluating",
)
parser.add_argument(
"--pretrained",
action="store_true",
default=False,
help="load the pretrained model",
)
args = parser.parse_args()
trainer = Engine(args)
trainer()
| Active-3D-Vision-and-Touch-main | pterotactyl/reconstruction/vision/train.py |
Active-3D-Vision-and-Touch-main | pterotactyl/pretrained/__init__.py |
|
Active-3D-Vision-and-Touch-main | pterotactyl/utility/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import random
from glob import glob
from tqdm import tqdm
import numpy as np
import torch
from torchvision import transforms
import pterotactyl.objects as objects
import pterotactyl.object_data as object_data
POINT_CLOUD_LOCATION = os.path.join(
os.path.dirname(object_data.__file__), "point_cloud_info/"
)
GRASP_LOCATION = os.path.join(os.path.dirname(object_data.__file__), "grasp_info/")
TOUCH_LOCATION = os.path.join(os.path.dirname(object_data.__file__), "touch_charts/")
IMAGE_LOCATION = os.path.join(
os.path.dirname(object_data.__file__), "images_colourful/"
)
DATA_SPLIT = np.load(
os.path.join(os.path.dirname(objects.__file__), "data_split.npy"), allow_pickle=True
).item()
OBJ_LOCATION = os.path.join(os.path.dirname(object_data.__file__), "object_info/")
preprocess = transforms.Compose([transforms.Resize((256, 256)), transforms.ToTensor()])
def get_finger_transforms(obj, grasp, finger):
ref_location = os.path.join(
GRASP_LOCATION, obj, str(grasp), f"{finger}_ref_frame.npy"
)
touch_info = np.load(ref_location, allow_pickle=True).item()
rot = touch_info["rot"]
pos = touch_info["pos"]
return torch.FloatTensor(rot), torch.FloatTensor(pos)
# class used for obtaining an instance of the dataset for training touch chart prediction
# to be passed to a pytorch dataloader
class mesh_loader_touch(object):
def __init__(self, args, set_type="train"):
# initialization of data locations
self.args = args
self.set_type = set_type
object_names = [
f.split("/")[-1].split(".")[0] for f in glob(f"{IMAGE_LOCATION}/*.npy")
]
self.object_names = []
if self.args.limit_data:
random.shuffle(object_names)
object_names = object_names[:3000]
for n in tqdm(object_names):
if os.path.exists(POINT_CLOUD_LOCATION + n + ".npy"):
if os.path.exists(GRASP_LOCATION + n):
if n in DATA_SPLIT[self.set_type]:
successful_touches = glob(
os.path.join(GRASP_LOCATION, n, "*", "*_touch.npy")
)
if self.args.limit_data:
random.shuffle(successful_touches)
successful_touches = successful_touches[:7]
for touch in successful_touches:
grasp_number = touch.split("/")[-2]
finger_number = touch.split("/")[-1].split("_")[0]
self.object_names.append([n, grasp_number, finger_number])
print(f"The number of {set_type} set objects found : {len(self.object_names)}")
def __len__(self):
return len(self.object_names)
def standerdize_point_size(self, points):
np.random.shuffle(points)
points = torch.FloatTensor(points)
while points.shape[0] < self.args.num_samples:
points = torch.cat((points, points, points, points))
perm = torch.randperm(points.shape[0])
idx = perm[: self.args.num_samples]
return points[idx]
def __getitem__(self, index):
object_name, grasp, finger = self.object_names[index]
# meta data
data = {}
data["names"] = object_name, grasp, finger
# hand infomation
data["rot"], data["pos"] = get_finger_transforms(object_name, grasp, finger)
# simulated touch information
touch = np.load(
os.path.join(GRASP_LOCATION, object_name, grasp, f"{finger}_touch.npy")
)
data["sim_touch"] = (
torch.FloatTensor(touch).permute(2, 0, 1).contiguous().view(3, 121, 121)
/ 255.0
)
# point cloud information
points = np.load(
os.path.join(GRASP_LOCATION, object_name, grasp, f"{finger}_points.npy")
)
data["samples"] = self.standerdize_point_size(points)
return data
def collate(self, batch):
data = {}
data["names"] = [item["names"] for item in batch]
data["samples"] = torch.cat([item["samples"].unsqueeze(0) for item in batch])
data["sim_touch"] = torch.cat(
[item["sim_touch"].unsqueeze(0) for item in batch]
)
data["ref"] = {}
data["ref"]["rot"] = torch.cat([item["rot"].unsqueeze(0) for item in batch])
data["ref"]["pos"] = torch.cat([item["pos"].unsqueeze(0) for item in batch])
return data
# class used for obtaining an instance of the dataset for training chart deformation
# to be passed to a pytorch dataloader
class mesh_loader_vision(object):
def __init__(self, args, set_type="train"):
# initialization of data locations
self.args = args
self.set_type = set_type
object_names = [
f.split("/")[-1].split(".")[0] for f in glob(f"{IMAGE_LOCATION}/*.npy")
]
if self.set_type == "recon_train" or self.set_type == "auto_train":
self.get_instance = self.get_training_instance
else:
self.get_instance = self.get_validation_instance
self.object_names = []
# for debuggin use less data
if args.limit_data:
random.Random(0).shuffle(object_names)
object_names = object_names[:2000]
seed = 0
for n in tqdm(object_names):
if os.path.exists(POINT_CLOUD_LOCATION + n + ".npy"):
if os.path.exists(TOUCH_LOCATION + n):
if n in DATA_SPLIT[self.set_type]:
iters = (
1
if (
self.set_type == "recon_train"
or self.set_type == "auto_train"
)
else 5
)
for _ in range(iters):
self.object_names.append([n, seed])
seed += 1
print(f"The number of {set_type} set objects found : {len(self.object_names)}")
def __len__(self):
return len(self.object_names)
def get_training_instance(self, index):
obj, seed = random.choice(self.object_names)
num_grasps_choice = random.choice(range(0, self.args.num_grasps + 1))
grasp_choices = [i for i in range(50)]
random.shuffle(grasp_choices)
grasps = grasp_choices[:num_grasps_choice]
return obj, grasps
def get_validation_instance(self, index):
obj, seed = self.object_names[index]
grasp_choices = [i for i in range(50)]
if self.args.val_grasps >= 0 and self.args.eval:
num_grasps_choice = self.args.val_grasps
else:
num_grasps_choice = random.Random(seed).choice(
range(0, self.args.num_grasps + 1)
)
random.Random(seed).shuffle(grasp_choices)
grasps = grasp_choices[:num_grasps_choice]
return obj, grasps
# load object point cloud
def get_points(self, obj):
point_location = os.path.join(POINT_CLOUD_LOCATION, obj + ".npy")
samples = np.load(point_location)
np.random.shuffle(samples)
gt_points = torch.FloatTensor(samples[: self.args.number_points])
return gt_points
# load image of object
def get_image(self, obj):
img = torch.empty((1))
if self.args.use_img:
img_location = os.path.join(IMAGE_LOCATION, obj + ".npy")
img = torch.FloatTensor(np.load(img_location)).permute(2, 0, 1) / 255.0
return torch.FloatTensor(img)
# load touch infomation from the object
def get_touch_info(self, obj, grasps):
touch_charts = torch.ones((1))
if self.args.use_touch:
remaining = self.args.num_grasps - len(grasps)
all_touch_charts = torch.FloatTensor(
np.load(TOUCH_LOCATION + obj + "/touch_charts.npy")
).view(50, 4, 25, 4)
if self.args.finger:
touch_charts = all_touch_charts[grasps][:, 1]
touch_charts = torch.cat((touch_charts, torch.zeros(remaining, 25, 4)))
else:
touch_charts = all_touch_charts[grasps]
touch_charts = torch.cat(
(touch_charts, torch.zeros(remaining, 4, 25, 4))
)
return touch_charts
def __getitem__(self, index):
obj, grasps = self.get_instance(index)
data = {}
# meta data
data["names"] = OBJ_LOCATION + obj, grasps
# load sampled ground truth points
data["gt_points"] = self.get_points(obj)
# load images
data["img"] = self.get_image(obj)
# get touch information
data["touch_charts"] = self.get_touch_info(obj, grasps)
return data
def collate(self, batch):
data = {}
data["names"] = [item["names"] for item in batch]
data["gt_points"] = torch.cat(
[item["gt_points"].unsqueeze(0) for item in batch]
)
data["img"] = torch.cat([item["img"].unsqueeze(0) for item in batch])
data["touch_charts"] = torch.cat(
[item["touch_charts"].unsqueeze(0) for item in batch]
)
return data
# class used for obtaining an instance of the dataset for training chart deformation
# to be passed to a pytorch dataloader
class mesh_loader_active(object):
def __init__(self, args, set_type="RL_train"):
# initialization of data locations
self.args = args
self.set_type = set_type
object_names = [
f.split("/")[-1].split(".")[0] for f in glob(f"{IMAGE_LOCATION}/*.npy")
]
self.object_names = []
# for debuggin use less data
if args.limit_data:
random.Random(0).shuffle(object_names)
object_names = object_names[:400]
for n in tqdm(object_names):
if os.path.exists(POINT_CLOUD_LOCATION + n + ".npy"):
if n in DATA_SPLIT[self.set_type]:
self.object_names.append(n)
print(f"The number of {set_type} set objects found : {len(self.object_names)}")
def __len__(self):
return (
len(self.object_names) // self.args.env_batch_size
) * self.args.env_batch_size
def get_instance(self, index):
obj = self.object_names[index]
num_grasps_choice = random.choice(range(0, self.args.num_grasps + 1))
grasp_choices = [i for i in range(50)]
random.shuffle(grasp_choices)
grasps = grasp_choices[:num_grasps_choice]
return obj, grasps
# load object point cloud
def get_points(self, obj):
point_location = os.path.join(POINT_CLOUD_LOCATION, obj + ".npy")
samples = np.load(point_location)
np.random.shuffle(samples)
gt_points = torch.FloatTensor(samples[: self.args.number_points])
return gt_points
# load image of object
def get_image(self, obj):
img = torch.empty((1))
if self.args.use_img:
img_location = os.path.join(IMAGE_LOCATION, obj + ".npy")
img = torch.FloatTensor(np.load(img_location)).permute(2, 0, 1) / 255.0
return torch.FloatTensor(img)
def __getitem__(self, index):
obj = self.object_names[index]
data = {}
# meta data
data["names"] = OBJ_LOCATION + obj
# load sampled ground truth points
data["gt_points"] = self.get_points(obj)
# load images
data["img"] = self.get_image(obj)
return data
def collate(self, batch):
data = {}
data["names"] = [item["names"] for item in batch]
data["gt_points"] = torch.cat(
[item["gt_points"].unsqueeze(0) for item in batch]
)
data["img"] = torch.cat([item["img"].unsqueeze(0) for item in batch])
return data
| Active-3D-Vision-and-Touch-main | pterotactyl/utility/data_loaders.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import random
import os
from PIL import Image
import math
import json
import numpy as np
import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation as R
import xml.etree.ElementTree as ET
from scipy import ndimage
from collections import namedtuple
from pytorch3d.loss import chamfer_distance as cuda_cd
from pytorch3d.ops.mesh_face_areas_normals import mesh_face_areas_normals
from pytorch3d.ops.sample_points_from_meshes import _rand_barycentric_coords
from pytorch3d.io.obj_io import load_obj, save_obj
from pterotactyl.utility import pretty_render
import pterotactyl.objects as objects
def load_mesh_vision(args, obj):
# load obj file
verts, faces = load_mesh_touch(obj)
# get adjacency matrix infomation
adj_info = adj_init(verts, faces, args)
return adj_info, verts
# set seeds for consistency
def set_seeds(seed):
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
random.seed(seed)
# normalizes symetric, binary adj matrix such that sum of each row is 1
def normalize_adj(mx):
rowsum = mx.sum(1)
r_inv = (1. / rowsum).view(-1)
r_inv[r_inv != r_inv] = 0.
mx = torch.mm(torch.eye(r_inv.shape[0]).to(mx.device) * r_inv, mx)
return mx
# defines the adjacecny matrix for an object
def adj_init(verts, faces, args):
# get generic adjacency matrix for vision charts
adj = calc_adj(faces)
adj_info = {}
adj_info['origional'] = normalize_adj(adj.clone())
# this combines the adjacency information of touch and vision charts
# the output adj matrix has the first k rows corresponding to vision charts, and the last |V| - k
# corresponding to touch charts. Similarly the first l faces are correspond to vision charts, and the
# remaining correspond to touch charts
if args.use_touch:
adj, faces = adj_fuse_touch(verts, faces, adj, args)
adj = normalize_adj(adj)
adj_info['adj'] = adj
adj_info['faces'] = faces
return adj_info
# combines graph for vision and touch charts to define a fused adjacency matrix
def adj_fuse_touch(verts, faces, adj, args):
verts = verts.data.cpu().numpy()
hash = {}
number_of_grasps = args.num_grasps
# find vertices which have the same 3D position
for e, v in enumerate(verts):
if v.tobytes() in hash:
hash[v.tobytes()].append(e)
else:
hash[v.tobytes()] = [e]
# load object information for generic touch chart
if args.use_touch:
chart_location = os.path.join(
os.path.dirname(objects.__file__), "touch_chart.obj"
)
sheet_verts, sheet_faces = load_mesh_touch(chart_location)
sheet_adj = calc_adj(sheet_faces)
# central vertex for each touch chart that will communicate with all vision charts
central_point = 4
fingers = 1 if args.finger else 4
central_points = [central_point + (i * sheet_adj.shape[0]) + adj.shape[0] for i in
range(fingers * number_of_grasps)]
# define and fill new adjacency matrix with vision and touch charts
new_dim = adj.shape[0] + (fingers * number_of_grasps * sheet_adj.shape[0])
new_adj = torch.zeros((new_dim, new_dim)).cuda()
new_adj[: adj.shape[0], :adj.shape[0]] = adj.clone()
for i in range(fingers * number_of_grasps):
start = adj.shape[0] + (sheet_adj.shape[0] * i)
end = adj.shape[0] + (sheet_adj.shape[0] * (i + 1))
new_adj[start: end, start:end] = sheet_adj.clone()
adj = new_adj
# define new faces with vision and touch charts
all_faces = [faces]
for i in range(fingers * number_of_grasps):
temp_sheet_faces = sheet_faces.clone() + verts.shape[0]
temp_sheet_faces += i * sheet_verts.shape[0]
all_faces.append(temp_sheet_faces)
faces = torch.cat(all_faces)
# update adjacency matrix to allow communication between vision and touch charts
for key in hash.keys():
cur_verts = hash[key]
if len(cur_verts) > 1:
for v1 in cur_verts:
for v2 in cur_verts: # vertices on the boundary of vision charts can communicate
adj[v1, v2] = 1
if args.use_touch:
for c in central_points: # touch and vision charts can communicate
adj[v1, c] = 1
adj[c, v1] = 1
return adj, faces
# computes adjacemcy matrix from face information
def calc_adj(faces):
v1 = faces[:, 0]
v2 = faces[:, 1]
v3 = faces[:, 2]
num_verts = int(faces.max())
adj = torch.eye(num_verts + 1).to(faces.device)
adj[(v1, v2)] = 1
adj[(v1, v3)] = 1
adj[(v2, v1)] = 1
adj[(v2, v3)] = 1
adj[(v3, v1)] = 1
adj[(v3, v2)] = 1
return adj
# sample points from a batch of meshes
def batch_sample(verts, faces, num=10000):
# Pytorch3D based code
bs = verts.shape[0]
face_dim = faces.shape[0]
vert_dim = verts.shape[1]
# following pytorch3D convention shift faces to correctly index flatten vertices
F = faces.unsqueeze(0).repeat(bs, 1, 1)
F += vert_dim * torch.arange(0, bs).unsqueeze(-1).unsqueeze(-1).to(F.device)
# flatten vertices and faces
F = F.reshape(-1, 3)
V = verts.reshape(-1, 3)
with torch.no_grad():
areas, _ = mesh_face_areas_normals(V, F)
Ar = areas.reshape(bs, -1)
Ar[Ar != Ar] = 0
Ar = torch.abs(Ar / Ar.sum(1).unsqueeze(1))
Ar[Ar != Ar] = 1
sample_face_idxs = Ar.multinomial(num, replacement=True)
sample_face_idxs += face_dim * torch.arange(0, bs).unsqueeze(-1).to(Ar.device)
# Get the vertex coordinates of the sampled faces.
face_verts = V[F]
v0, v1, v2 = face_verts[:, 0], face_verts[:, 1], face_verts[:, 2]
# Randomly generate barycentric coords.
w0, w1, w2 = _rand_barycentric_coords(bs, num, V.dtype, V.device)
# Use the barycentric coords to get a point on each sampled face.
A = v0[sample_face_idxs] # (N, num_samples, 3)
B = v1[sample_face_idxs]
C = v2[sample_face_idxs]
samples = w0[:, :, None] * A + w1[:, :, None] * B + w2[:, :, None] * C
return samples
# implemented from:
# https://github.com/EdwardSmith1884/GEOMetrics/blob/master/utils.py
# MIT License
# loads the initial mesh and returns vertex, and face information
def load_mesh_touch(obj):
obj_info = load_obj(obj)
verts = obj_info[0]
faces = obj_info[1].verts_idx
verts = torch.FloatTensor(verts).cuda()
faces = torch.LongTensor(faces).cuda()
return verts, faces
# returns the chamfer distance between a mesh and a point cloud
def chamfer_distance(verts, faces, gt_points, num=1000, repeat=3):
pred_points= batch_sample(verts, faces, num=num)
cd, _ = cuda_cd(pred_points, gt_points, batch_reduction=None)
if repeat > 1:
cds = [cd]
for i in range(repeat - 1):
pred_points = batch_sample(verts, faces, num=num)
cd, _ = cuda_cd(pred_points, gt_points, batch_reduction=None)
cds.append(cd)
cds = torch.stack(cds)
cd = cds.mean(dim=0)
return cd
# saves a point cloud as a .obj file
def save_points(file, points):
location = f'{file}.obj'
try:
write_obj(location, points.data.cpu().numpy(), [])
except:
write_obj(location, points, [])
# converts a voxel object to a point cloud
def extract_surface(voxel):
conv_filter = torch.ones((1, 1, 3, 3, 3)).cuda()
local_occupancy = F.conv3d(voxel.unsqueeze(
0).unsqueeze(0), conv_filter, padding=1)
local_occupancy = local_occupancy.squeeze(0).squeeze(0)
# only elements with exposed faces
surface_positions = (local_occupancy < 27) * (local_occupancy > 0)
points = torch.where(surface_positions)
points = torch.stack(points)
points = points.permute(1, 0)
return points.type(torch.cuda.FloatTensor)
# saves a mesh as an .obj file
def write_obj(filename, verts, faces):
""" write the verts and faces on file."""
with open(filename, 'w') as f:
# write vertices
f.write('g\n# %d vertex\n' % len(verts))
for vert in verts:
f.write('v %f %f %f\n' % tuple(vert))
# write faces
f.write('# %d faces\n' % len(faces))
for face in faces:
f.write('f %d %d %d\n' % tuple(face))
# makes the sphere of actions
class get_circle(object):
def __init__(self, num_points, rank=0):
action_position = []
a = 4 * np.pi / float(num_points)
d = math.sqrt(a)
M_t = round(np.pi / d)
d_t = np.pi / M_t
d_phi = a / d_t
sphere_positions = []
for i in range(0, M_t):
theta = np.pi * (i + .5) / M_t
M_phi = round(2 * np.pi * math.sin(theta) / d_phi)
for j in range(0, M_phi):
phi = 2 * np.pi * j / M_phi
point = self.get_point(theta, phi)
sphere_positions.append([theta, phi])
action_position.append(point)
self.points = torch.stack(action_position)
self.sphere_points = sphere_positions
if num_points != self.points.shape[0]:
print(f' we have {self.points.shape} points but want {num_points}')
exit()
def get_point(self, a, b):
x = math.sin(a) * math.cos(b)
y = math.sin(a) * math.sin(b)
z = math.cos(a)
return torch.FloatTensor([x, y, z])
# get the normal of a 3D traingle
def normal_from_triangle(a, b, c):
A = b - a
B = c - a
normal = np.cross(A, B)
normal = normalize_vector(normal.reshape(1, 1, 3))
return normal.reshape(3)
# normalizes a vector
def normalize_vector(vector):
n = np.linalg.norm(vector, axis=2)
vector[:, :, 0] /= n
vector[:, :, 1] /= n
vector[:, :, 2] /= n
return vector
# combines 2 3D rotations and converts to a quaternion
def quats_from_vectors(vec1, vec2):
vec1 = np.array(vec1)
vec2 = np.array(vec2)
a, b = (vec1 / np.linalg.norm(vec1)).reshape(3), (vec2 / np.linalg.norm(vec2)).reshape(3)
v = np.cross(a, b)
c = np.dot(a, b)
s = np.linalg.norm(v)
if s == 0:
s = 1
kmat = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
rotation_matrix = np.eye(3) + kmat + kmat.dot(kmat) * ((1 - c) / (s ** 2))
quat = R.from_matrix(rotation_matrix).as_quat()
return quat
# combines two quaternions
def combine_quats(q1, q2):
r1 = R.from_quat(q1).as_matrix()
r2 = R.from_quat(q2).as_matrix()
new_q = R.from_matrix(np.matmul(r1, r2)).as_quat()
return new_q
# converts a euler rotation to a rotation matrix
def euler2matrix(angles=[0, 0, 0], translation=[0, 0, 0], xyz="xyz", degrees=False):
r = R.from_euler(xyz, angles, degrees=degrees)
pose = np.eye(4)
pose[:3, 3] = translation
pose[:3, :3] = r.as_matrix()
return pose
# adds redundent faces
def add_faces(faces):
f1 = np.array(faces[:, 0]).reshape(-1, 1)
f2 = np.array(faces[:, 1]).reshape(-1, 1)
f3 = np.array(faces[:, 2]).reshape(-1, 1)
faces_2 = np.concatenate((f1, f3, f2), axis=-1)
faces_3 = np.concatenate((f3, f2, f1), axis=-1)
faces = np.concatenate((faces, faces_2, faces_3), axis=0)
return faces
# centers a pointcloud and scales to defined size
def scale_points(points, scale = 1.):
for i in range(3):
points[:,i] -= points[:,i].min()
points = points / points.max()
points = points / scale
for i in range(3):
verts_range = points[:, i].max()
points[:, i] -= verts_range / 2.
return points
# makes a urdf file pointing to a mesh
def make_urdf(verts, faces, urdf_location):
obj_location = urdf_location.replace('.urdf', '.obj')
faces = add_faces(faces)
save_obj(obj_location, torch.FloatTensor(verts), torch.LongTensor(faces), 4)
blank_location = os.path.join(os.path.dirname(objects.__file__), 'blank.urdf')
tree = ET.parse(blank_location)
root = tree.getroot()
root.attrib['name'] = 'object.urdf'
root[0][2][1][0].attrib['filename'] = obj_location
root[0][3][1][0].attrib['filename'] = obj_location
tree.write(urdf_location)
# loads a obj file and scales it
def get_obj_data(obj_location, scale = 1.):
obj_info = load_obj(obj_location)
verts = obj_info[0].data.numpy()
verts = scale_points(verts, scale)
faces = obj_info[1].verts_idx.data.numpy()
return verts, faces
# converts a mesh to a voxel array by subdeviding the mesh
def mesh_to_voxel(verts, faces, resolution):
# maximum side lentghs of the subdevided triangles
smallest_side = (1. / resolution) ** 2
# center the mesh and scales to unit
verts_max = verts.max()
verts_min = verts.min()
verts = (verts - verts_min) / (verts_max - verts_min) - 0.5
# get all of the mesh triangles
faces = faces.clone()
v1 = torch.index_select(verts, 0, faces[:, 0])
v2 = torch.index_select(verts, 0, faces[:, 1])
v3 = torch.index_select(verts, 0, faces[:, 2])
# defined points as swt of all vertices
points = torch.cat((v1, v2, v3))
while True:
# get maximum side length of all traingles
side_1 = (torch.abs(v1 - v2) ** 2).sum(dim=1).unsqueeze(1)
side_2 = (torch.abs(v2 - v3) ** 2).sum(dim=1).unsqueeze(1)
side_3 = (torch.abs(v3 - v1) ** 2).sum(dim=1).unsqueeze(1)
sides = torch.cat((side_1, side_2, side_3), dim=1)
sides = sides.max(dim=1)[0]
# identify triangles which are small enough
keep = sides > smallest_side
if keep.sum() == 0:
break
# remove triangles which are small enough
v1 = v1[keep]
v2 = v2[keep]
v3 = v3[keep]
v4 = (v1 + v3) / 2.
v5 = (v1 + v2) / 2.
v6 = (v2 + v3) / 2.
del (side_1, side_2, side_3, keep, sides)
# add new vertices to set of points
points = torch.cat((points, v4, v5, v6))
# add subdevided traingles to list of triagnles
vertex_set = [v1, v2, v3, v4, v5, v6]
new_traingles = [[0, 3, 4], [4, 1, 5], [4, 3, 5], [3, 2, 5]]
new_verts = []
for i in range(4):
for j in range(3):
if i == 0:
new_verts.append(vertex_set[new_traingles[i][j]])
else:
new_verts[j] = torch.cat(
(new_verts[j], vertex_set[new_traingles[i][j]]))
v1, v2, v3 = new_verts
del (v4, v5, v6, vertex_set, new_verts)
del (v1, v2, v3)
if points is None:
return None
# scales points
points = ((points + .5) * (resolution - 1)).long()
points = torch.split(points.permute(1, 0), 1, dim=0)
points = [m.unsqueeze(0) for m in points]
# set grid points to on if a point exists inside them
voxel = torch.zeros((resolution, resolution, resolution)).cuda()
voxel[points] = 1
return voxel
# converts a voxel grid to a pointcloud
def voxel_to_pointcloud(voxel):
voxel = voxel.float()
off_positions = voxel == 0
conv_filter = torch.ones((1, 1, 3, 3, 3))
surface_voxel = torch.zeros(voxel.shape).cuda()
conv_filter = conv_filter.cuda()
local_occupancy = F.conv3d(voxel.unsqueeze(0).unsqueeze(0), conv_filter, padding=1)
local_occupancy = local_occupancy.squeeze(0).squeeze(0)
surface_positions = (local_occupancy < 27) * (local_occupancy > 0)
surface_voxel[surface_positions] = 1
surface_voxel[off_positions] = 0
points = torch.where(surface_voxel != 0)
points = torch.stack(points).permute(1, 0).float()
return points
# implemented from:
# https://github.com/EdwardSmith1884/GEOMetrics/blob/master/utils.py
# MIT License
def extract_ODMs(voxels):
voxels = voxels.data.cpu().numpy()
dim = voxels.shape[0]
a, b, c = np.where(voxels == 1)
large = int(dim * 1.5)
big_list = [[[[-1, large] for j in range(dim)] for i in range(dim)] for k in range(3)]
# over the whole object extract for each face the first and last occurance of a voxel at each pixel
# we take highest for convinience
for i, j, k in zip(a, b, c):
big_list[0][i][j][0] = (max(k, big_list[0][i][j][0]))
big_list[0][i][j][1] = (min(k, big_list[0][i][j][1]))
big_list[1][i][k][0] = (max(j, big_list[1][i][k][0]))
big_list[1][i][k][1] = (min(j, big_list[1][i][k][1]))
big_list[2][j][k][0] = (max(i, big_list[2][j][k][0]))
big_list[2][j][k][1] = (min(i, big_list[2][j][k][1]))
ODMs = np.zeros((6, dim, dim)) # will hold odms
for i in range(dim):
for j in range(dim):
ODMs[0, i, j] = dim - 1 - big_list[0][i][j][0] if big_list[0][i][j][0] > -1 else dim
ODMs[1, i, j] = big_list[0][i][j][1] if big_list[0][i][j][1] < large else dim
ODMs[2, i, j] = dim - 1 - big_list[1][i][j][0] if big_list[1][i][j][0] > -1 else dim
ODMs[3, i, j] = big_list[1][i][j][1] if big_list[1][i][j][1] < large else dim
ODMs[4, i, j] = dim - 1 - big_list[2][i][j][0] if big_list[2][i][j][0] > -1 else dim
ODMs[5, i, j] = big_list[2][i][j][1] if big_list[2][i][j][1] < large else dim
return ODMs
# implemented from:
# https://github.com/EdwardSmith1884/GEOMetrics/blob/master/utils.py
# MIT License
# use orthographic depth maps to do space carving
def apply_ODMs(ODMs, dim):
voxel = np.ones((dim, dim, dim))
a, b, c = np.where(ODMs > 0)
for x, i, j in zip(a, b, c):
pos = int(ODMs[x, i, j])
if x == 0:
voxel[i, j, -pos:] = 0
if x == 1:
voxel[i, j, :pos] = 0
if x == 2:
voxel[i, -pos:, j] = 0
if x == 3:
voxel[i, :pos, j] = 0
if x == 4:
voxel[-pos:, i, j] = 0
if x == 5:
voxel[:pos, i, j] = 0
voxel[ndimage.binary_fill_holes(voxel)] = 1
return torch.LongTensor(voxel).cuda()
# aligns a pointcloud to the size of a mesh
def realign_points(points, verts):
points = points.float()
verts = verts
for i in range(3):
points[:, i] = points[:, i] - ((points[:, i].max() + points[:, i].min()) / 2.)
v_range = verts[:, i].max() - verts[:, i].min()
p_range = points[:, i].max() + 1 - points[:, i].min()
points[:, i] = points[:, i] * v_range / p_range
return points
# saves arguments for a experiment
def save_config(location, args):
abs_path = os.path.abspath(location)
args = vars(args)
args['check_point'] = abs_path
config_location = f'{location}/config.json'
with open(config_location, 'w') as fp:
json.dump(args, fp, indent=4)
return config_location
# loads arguments from an experiment and the model weights
def load_model_config(location):
config_location = f'{location}/config.json'
with open(config_location) as json_file:
data = json.load(json_file)
weight_location = data['check_point'] + '/model'
args = namedtuple("ObjectName", data.keys())(*data.values())
return args, weight_location
# for nicely visualizing dpeth images
def visualize_depth(depth, max_depth=0.025):
depth[depth > max_depth] = 0
depth = 255 * (depth / max_depth)
depth = depth.astype(np.uint8)
return depth
# visualize the actions used by the policy
def visualize_actions(location, actions, args):
actions = actions.view(-1).long().data.cpu().numpy()
circle = get_circle(args.num_actions)
plt.hist(actions, bins=np.arange(0, args.num_actions+ 1 ))
plt.title("actions histogram")
plt.savefig(location + '/histogram.png')
plt.close()
array = np.zeros([args.num_actions * 2, args.num_actions * 4, 3])
for i in range(args.num_actions):
x, y, z = circle.points[i]
x = math.atan2(-x, y);
x = (x + np.pi / 2.0) / (np.pi * 2.0) + np.pi * (28.670 / 360.0);
y = math.acos(z) / np.pi;
x_co = int(y * args.num_actions * 12 / (2 * np.pi))
y_co = int(x * args.num_actions * 24 / (2 * np.pi))
for i in range(3):
for j in range(3):
array[x_co - 1 + i, y_co - 1 + j] += 1.
for a in actions:
x, y, z = circle.points[a]
x = math.atan2(-x, y);
x = (x + np.pi / 2.0) / (np.pi * 2.0) + np.pi * (28.670 / 360.0);
y = math.acos(z) / np.pi;
x_co = int(y * args.num_actions * 12 / (2 * np.pi))
y_co = int(x * args.num_actions * 24 / (2 * np.pi))
for i in range(3):
for j in range(3):
array[x_co - 1 + i, y_co - 1 + j] += 1.
array = array * 255. / array.max()
if args.use_img:
visible_location = os.path.join(
os.path.dirname(objects.__file__), "visible.obj"
)
seen_points = np.array(load_obj(visible_location)[0])
seen_points = seen_points / np.sqrt(((seen_points ** 2).sum(axis=1))).reshape(-1, 1)
for point in seen_points:
x, y, z = point
x = math.atan2(-x, y);
x = (x + np.pi / 2.0) / (np.pi * 2.0) + np.pi * (28.670 / 360.0);
y = math.acos(z) / np.pi;
x_co = int(y * args.num_actions * 12 / (2 * np.pi))
y_co = int(x * args.num_actions * 24 / (2 * np.pi))
for i in range(5):
for j in range(5):
if array[x_co - 2 + i, y_co - 2 + j].sum() == 0:
array[x_co - 2 + i, y_co - 2 + j] = (255, 127, 80)
array[np.all(array == (0, 0, 0), axis=-1)] = (0, 204, 204)
check_array = np.zeros([args.num_actions * 2, args.num_actions * 4])
for point in seen_points:
x, y, z = point
x = math.atan2(-x, y);
x = (x + np.pi / 2.0) / (np.pi * 2.0) + np.pi * (28.670 / 360.0);
y = math.acos(z) / np.pi;
x_co = int(y * args.num_actions * 12 / (2 * np.pi))
y_co = int(x * args.num_actions * 24 / (2 * np.pi))
for i in range(3):
for j in range(3):
check_array[x_co - 1 + i, y_co - 1 + j] = 100
on = 0.
off = 0.
for a in actions:
x, y, z = circle.points[a]
x = math.atan2(-x, y);
x = (x + np.pi / 2.0) / (np.pi * 2.0) + np.pi * (28.670 / 360.0);
y = math.acos(z) / np.pi;
x_co = int(y * args.num_actions * 12 / (2 * np.pi))
y_co = int(x * args.num_actions * 24 / (2 * np.pi))
if check_array[x_co, y_co] > 0:
on += 1
else:
off += 1
print(f'percentage in vision is {on * 100 / (on+off):.2f} % for policy')
else:
array[np.all(array == (0, 0, 0), axis=-1)] = (0, 204, 204)
array = array.astype(np.uint8)
Image.fromarray(array).save(location + '/sphere_projection.png')
# visualize the actions used by the policy
def visualize_prediction(location, meshes, faces, names):
data = {}
meshes = meshes.data.cpu().numpy()
faces = faces.data.cpu().numpy()
locations = []
for n in names:
n = '/'+ n.split('/')[-1] + '/'
locations.append(location + n)
if not os.path.exists(locations[-1]):
os.makedirs(locations[-1])
data['locations'] = locations
pretty_render.render_representations(locations, names, meshes, faces)
| Active-3D-Vision-and-Touch-main | pterotactyl/utility/utils.py |
#Copyright (c) Facebook, Inc. and its affiliates.
#All rights reserved.
#This source code is licensed under the license found in the
#LICENSE file in the root directory of this source tree.
import os
os.environ["PYOPENGL_PLATFORM"] = "egl"
import numpy as np
import trimesh
from scipy.spatial.transform import Rotation as R
import pyrender
from PIL import Image
import torch
from tqdm.contrib import tzip
from pterotactyl.utility import utils
class CameraRenderer:
def __init__(self, cameraResolution=[512, 512]):
self.W = cameraResolution[0]
self.H = cameraResolution[1]
self._init_pyrender()
def _init_pyrender(self):
self.scene = self._init_scene()
self.objectNodes = []
self.handNodes = []
self._init_camera()
self.r = pyrender.OffscreenRenderer(self.W, self.H)
def _init_scene(self):
scene = pyrender.Scene(ambient_light=[0.3, 0.3, 0.3])
light_pose = utils.euler2matrix(
angles=[0, 0, 0], translation=[0, -0.8, 0.3], xyz="xyz", degrees=False
)
light = pyrender.PointLight(color=np.ones(3), intensity=1.8)
scene.add(light, pose=light_pose)
light_pose = utils.euler2matrix(
angles=[0, 0, 0], translation=[0, 0.8, 0.3], xyz="xyz", degrees=False
)
light = pyrender.PointLight(color=np.ones(3), intensity=1.8)
scene.add(light, pose=light_pose)
light_pose = utils.euler2matrix(
angles=[0, 0, 0], translation=[-1, 0, 1], xyz="xyz", degrees=False
)
light = pyrender.PointLight(color=np.ones(3), intensity=1.8)
scene.add(light, pose=light_pose)
light_pose = utils.euler2matrix(
angles=[0, 0, 0], translation=[1, 0, 1], xyz="xyz", degrees=False
)
light = pyrender.PointLight(color=np.ones(3), intensity=1.8)
scene.add(light, pose=light_pose)
return scene
def _init_camera(self):
camera = pyrender.PerspectiveCamera(
yfov=60.0 / 180.0 * np.pi, znear=0.01, zfar=10.0, aspectRatio=1.0
)
cameraPose0 = utils.euler2matrix(
xyz="xyz", angles=[0, 0, 0], translation=[0, 0, 0], degrees=True
)
# Add camera node into scene
cameraNode = pyrender.Node(camera=camera, matrix=cameraPose0)
self.scene.add_node(cameraNode)
self.scene.main_camera_node = cameraNode
self.camera = cameraNode
initial_matrix = R.from_euler("xyz", [45.0, 0, 180.0], degrees=True).as_matrix()
self.update_camera_pose([0, 0.6, 0.6], initial_matrix)
def update_camera_pose(self, position, orientation):
"""
Update digit pose (including camera, lighting, and gel surface)
"""
pose = np.eye(4)
pose[:3, 3] = position
pose[:3, :3] = orientation
self.camera.matrix = pose
def add_object(self, objTrimesh, position=[0, 0, 0], orientation=[0, 0, 0]):
mesh = pyrender.Mesh.from_trimesh(objTrimesh)
pose = utils.euler2matrix(angles=orientation, translation=position)
objNode = pyrender.Node(mesh=mesh, matrix=pose)
self.scene.add_node(objNode)
self.objectNodes.append(objNode)
def add_points(self, points, radius, colour=[0, 0, 0]):
sm = trimesh.creation.uv_sphere(radius=radius)
sm.visual.vertex_colors = colour
tfs = np.tile(np.eye(4), (points.shape[0], 1, 1))
tfs[:, :3, 3] = points
m = pyrender.Mesh.from_trimesh(sm, poses=tfs)
objNode = pyrender.Node(mesh=m)
self.scene.add_node(objNode)
self.objectNodes.append(objNode)
def remove_objects(self):
for obj in self.objectNodes:
self.scene.remove_node(obj)
self.objectNodes = []
def render(self):
colour, depth = self.r.render(self.scene)
colour = np.clip((np.array(colour)), 0, 255).astype(np.uint8)
colour = Image.fromarray(colour)
return colour
# renders the predicted mesh along with the ground truth mesh
def render_representations(locations, names, meshes, faces):
recon_face = utils.add_faces(faces)
scene = CameraRenderer()
message = "rendering the predicted objects"
print("*" * len(message))
print(message)
print("*" * len(message))
for verts, name, location in tzip(meshes, names, locations):
###### render mesh #######
mesh = trimesh.Trimesh(verts, recon_face)
mesh.visual.vertex_colors = [228, 217, 111, 255]
scene.add_object(mesh)
img = scene.render()
img.save(f"{location}/mesh.png")
scene.remove_objects()
##### render point clouds #######
verts = torch.FloatTensor(verts).cuda()
faces = torch.LongTensor(recon_face).cuda()
points = (
utils.batch_sample(verts.unsqueeze(0), faces, num=100000)[0]
.data.cpu()
.numpy()
)
scene.add_points(points, 0.01, [228, 217, 111])
img = scene.render()
img.save(f"{location}/points.png")
scene.remove_objects()
######## render real object #########
verts = np.load(name + "_verts.npy")
faces = np.load(name + "_faces.npy")
faces = utils.add_faces(faces)
mesh = trimesh.Trimesh(vertices=verts, faces=faces, process=False)
mesh.visual.vertex_colors = [228, 217, 111, 255]
scene.add_object(mesh)
img = scene.render()
img.save(f"{location}/ground_truth.png")
scene.remove_objects()
| Active-3D-Vision-and-Touch-main | pterotactyl/utility/pretty_render.py |
#Copyright (c) Facebook, Inc. and its affiliates.
#All rights reserved.
#This source code is licensed under the license found in the
#LICENSE file in the root directory of this source tree.
import os
import numpy as np
from tqdm import tqdm
from glob import glob
import random
from pathlib import Path
import torch
import pterotactyl.object_data as object_data
import pterotactyl.objects as objects
from pterotactyl.utility import utils
from pterotactyl.simulator.scene import sampler
from pterotactyl.simulator.physics import grasping
def make_data_split():
data_location = os.path.join(
os.path.dirname(object_data.__file__), "initial_objects/*"
)
split_destination = os.path.join(
os.path.dirname(objects.__file__), "data_split.npy"
)
object_files = glob(data_location)
object_files = [o.split("/")[-1].split(".")[0] for o in object_files]
object_files.sort()
random.Random(0).shuffle(object_files)
recon_train = object_files[:7700]
auto_train = object_files[7700 : 2 * 7700]
RL_train = object_files[2 * 7700 : 3 * 7700]
valid = object_files[3 * 7700 : 3 * 7700 + 2000]
test = object_files[3 * 7700 + 2000 : 3 * 7700 + 3000]
dict = {
"recon_train": recon_train,
"auto_train": auto_train,
"RL_train": RL_train,
"valid": valid,
"test": test,
}
np.save(split_destination, dict)
# produces a pointcloud from the surface of an object
def extract_points(verts, faces, dim=128, num_points=30000):
verts = torch.FloatTensor(verts).cuda()
faces = torch.LongTensor(faces).cuda()
# converts the mesh to a voxel grid
voxel = utils.mesh_to_voxel(verts, faces, dim)
if voxel is None:
return None
# extracts orthographic depth maps from the voxel grid
ODMs = utils.extract_ODMs(voxel)
# reprojects the depth maps to a voxel grid to remove internal structure
voxel = utils.apply_ODMs(ODMs, dim)
# extracts a point cloud from the voxel grid
points = utils.voxel_to_pointcloud(voxel)
# aligns the pointcloud to the origional mesh
points = utils.realign_points(points, verts.clone())
# make the point cloud of uniform size
while points.shape[0] < num_points:
points = torch.cat((points, points))
choices = np.random.choice(points.shape[0], num_points, replace=False)
points = points[choices]
return points
# extract the object information from mesh
def save_object_info():
data_location = os.path.join(
os.path.dirname(object_data.__file__), "initial_objects/*"
)
data_destination = os.path.join(
os.path.dirname(object_data.__file__), "object_info/"
)
if not os.path.exists(data_destination):
os.makedirs(data_destination)
object_files = glob(data_location)
pbar = tqdm(object_files, smoothing=0.0)
pbar.set_description(f"Saving object information for quick loading")
for file in pbar:
file_destination = data_destination + file.split("/")[-1].split(".")[0]
# scale meshes and extract vertices and faces
verts, faces = utils.get_obj_data(file, scale=3.1)
np.save(file_destination + "_verts.npy", verts)
np.save(file_destination + "_faces.npy", faces)
# save the new object as a mesh and reference it in a urdf file for pybullet
utils.make_urdf(verts, faces, file_destination + ".urdf")
# extracts a point cloud from the object and saves it
def save_point_info():
data_location = os.path.join(
os.path.dirname(object_data.__file__), "object_info/*.obj"
)
data_destination = os.path.join(os.path.dirname(object_data.__file__), "/")
if not os.path.exists(data_destination):
os.makedirs(data_destination)
object_files = glob(data_location)
pbar = tqdm(object_files, smoothing=0.0)
pbar.set_description(f"Extracting surface point cloud")
for file in pbar:
destination = data_destination + file.split("/")[-1].split(".")[0] + ".npy"
verts = np.load(file.replace(".obj", "_verts.npy"))
faces = np.load(file.replace(".obj", "_faces.npy"))
# extract the point cloud
points = extract_points(verts, faces)
if points is None:
continue
np.save(destination, points.data.cpu().numpy())
# simulates the graps of an object for all possible actions
def save_simulation():
data_location = os.path.join(
os.path.dirname(object_data.__file__), "object_info/*.obj"
)
grasp_destination_dir = os.path.join(
os.path.dirname(object_data.__file__), "grasp_info/"
)
image_destination_dir = os.path.join(
os.path.dirname(object_data.__file__), "images_colourful/"
)
if not os.path.exists(grasp_destination_dir):
os.makedirs(grasp_destination_dir)
if not os.path.exists(image_destination_dir):
os.makedirs(image_destination_dir)
object_files = glob(data_location)
simulation_infomation = {}
# defines the sampling function for simulation
s = sampler.Sampler(grasping.Agnostic_Grasp, bs=1, vision=True)
pbar = tqdm(object_files, smoothing=0.0)
pbar.set_description(f"Extracting grasp information")
set = [0, 0, 0, 0]
file_num = 0
for file in pbar:
file_number = file.split("/")[-1].split(".")[0]
grasp_destination = grasp_destination_dir + file_number + "/"
image_destination = image_destination_dir + file_number + ".npy"
batch = [file.replace(".obj", "")]
statuses = []
try:
s.load_objects(batch, from_dataset=True)
except:
continue
# save an image of the object
signals = s.sample(
[0],
touch=False,
touch_point_cloud=False,
vision=True,
vision_occluded=False,
)
img = signals["vision"][0]
np.save(image_destination, img)
for i in range(50):
# simulate the object
signals = s.sample(
[i],
touch=True,
touch_point_cloud=True,
vision=False,
vision_occluded=False,
)
status = signals["touch_status"][0]
good = 0
for k in range(4):
if status[k] == "touch":
good += 1
for k in range(good):
set[k] += 1
statuses.append(status)
# extracts the touch information for each of the 4 fingers
for j in range(4):
instance_grasp_destination = os.path.join(
grasp_destination, str(i), str(j)
)
Path(instance_grasp_destination).mkdir(parents=True, exist_ok=True)
if status[j] == "touch":
touch_signal = (
signals["touch_signal"][0][j].data.numpy().astype(np.uint8)
)
touch_points = signals["touch_point_cloud"][0][j]
np.save(instance_grasp_destination + "_touch.npy", touch_signal)
np.save(instance_grasp_destination + "_points.npy", touch_points)
if status[j] != "no_intersection":
ref_frame_pos = signals["finger_transfrom_pos"][0][j].data.numpy()
ref_frame_rot_M = signals["finger_transform_rot_M"][0][
j
].data.numpy()
ref_frame = {"pos": ref_frame_pos, "rot": ref_frame_rot_M}
np.save(instance_grasp_destination + "_ref_frame.npy", ref_frame)
s.remove_objects()
file_num += 0.5
simulation_infomation[file_number] = statuses
if __name__ == "__main__":
save_object_info()
save_point_info()
save_simulation()
make_data_split()
| Active-3D-Vision-and-Touch-main | pterotactyl/utility/data_making.py |
import nltk
import textstat
from nltk.tree import Tree
import benepar, spacy
import json
import numpy as np
nlp = spacy.load('en_core_web_md')
nlp.add_pipe(benepar.BeneparComponent("benepar_en3"))
import torch, transformers
tokenizer = transformers.AutoTokenizer.from_pretrained('../training/checkpoint-all-nli-100000')
model = transformers.AutoModelForSequenceClassification.from_pretrained('../training/checkpoint-all-nli-100000')
model = model.eval().cuda()
# helpers for computing syntactic complexity
def calc_words(t):
if type(t) == str:
return 1
else:
val = 0
for child in t:
val += calc_words(child)
return val
def calc_yngve(t, par):
if type(t) == str:
return par
else:
val = 0
for i, child in enumerate(reversed(t)):
val += calc_yngve(child, par+i)
return val
def compute_syntactic_complexity(sentence):
doc = nlp(sentence)
sent = list(doc.sents)[0]
line = sent._.parse_string
t = Tree.fromstring(line)
words = calc_words(t)
yngve = calc_yngve(t, 0)
return round(float(yngve)/words, 2)
def compute_stats(contexts, hypotheses, labels):
readability_scores = []
yngve_scores = []
sentence_lengths = []
model_fooled = []
for idx, (context, hypothesis, label) in enumerate(zip(contexts, hypotheses, labels)):
readability_scores.append(textstat.flesch_kincaid_grade(hypothesis))
yngve_scores.append(compute_syntactic_complexity(hypothesis))
sentence_lengths.append(len(nltk.word_tokenize(hypothesis)))
tokens = tokenizer.encode(context, hypothesis, return_tensors='pt').cuda()
model_pred = model(tokens).logits.detach()
if torch.argmax(model_pred).item() == 2 and label == 'contradiction' or torch.argmax(model_pred).item() == 1 and label == 'entailment':
model_fooled.append(1)
else:
model_fooled.append(0)
return readability_scores, yngve_scores, sentence_lengths, model_fooled
datasets = ['../data/non-adversarial-with-rounds.jsonl', '../data/static-adversarial-with-rounds.jsonl', '../data/dynamic-adversarial-with-rounds.jsonl']
for dataset in datasets:
contexts = []
hypotheses = []
labels = []
for line in open(dataset,'rb'):
myjson = json.loads(line)
contexts.append(myjson['sentence1'])
hypotheses.append(myjson['sentence2'].strip())
labels.append(myjson['label'])
readability_scores, yngve_scores, sentence_lengths, model_fooled = compute_stats(contexts, hypotheses, labels)
print("Dataset", dataset)
print('Num Examples', len(hypotheses))
print('Readability Scores', np.mean(readability_scores))
print('Yngve Scores', np.mean(yngve_scores))
print('Sentence Lengths', np.mean(sentence_lengths))
print('Fooling Rate All-NLI Model', np.mean(model_fooled))
| dadc-limit-main | analysis/complexity.py |
import nltk
import json
from tqdm import tqdm
import numpy as np
def compute_stats(contexts, hypotheses, labels):
high_overlap_rates_entailment = []
high_overlap_rates_contradiction = []
for context, hypothesis, label in tqdm(zip(contexts, hypotheses, labels)):
context_tokens = nltk.word_tokenize(context)
hypothesis_tokens = [h for h in nltk.word_tokenize(hypothesis) if h != '.']
overlap_rate = sum([h in context_tokens for h in hypothesis_tokens]) / len(hypothesis_tokens)
if overlap_rate > 0.9:
if label == 'entailment':
high_overlap_rates_entailment.append(1)
elif label == 'contradiction':
high_overlap_rates_contradiction.append(1)
else:
exit('label not in set')
else:
if label == 'contradiction':
high_overlap_rates_contradiction.append(0)
if label == 'entailment':
high_overlap_rates_entailment.append(0)
return np.array(high_overlap_rates_entailment), np.array(high_overlap_rates_contradiction)
datasets = ['../data/non-adversarial-with-rounds.jsonl', '../data/static-adversarial-with-rounds.jsonl', '../data/dynamic-adversarial-with-rounds.jsonl']
for dataset in datasets:
contexts = []
hypotheses = []
labels = []
for line in open(dataset,'rb'):
myjson = json.loads(line)
contexts.append(myjson['sentence1'])
hypotheses.append(myjson['sentence2'].strip())
labels.append(myjson['label'])
high_overlap_rates_entailment, high_overlap_rates_contradiction = compute_stats(contexts, hypotheses, labels)
print("Dataset", dataset)
print('High Overlap Entailment Count', sum(high_overlap_rates_entailment))
print('High Overlap Contradiction Count', sum(high_overlap_rates_contradiction))
print('High Overlap Entailment %', sum(high_overlap_rates_entailment) / (sum(high_overlap_rates_entailment) + sum(high_overlap_rates_contradiction)))
| dadc-limit-main | analysis/artifacts.py |
import nltk
import json
import numpy as np
from sacrebleu.metrics import BLEU
from tqdm import tqdm
from collections import defaultdict
import random
def interbleu(hypotheses, labels):
bleu = BLEU()
avg_score = 0.0
total = 0.0
random.shuffle(hypotheses)
for hypothesis1_idx, hypothesis1 in tqdm(enumerate(hypotheses)):
if hypothesis1_idx > 1000:
continue
max_score = 0.0
best_sentence = None
curr_label = labels[hypothesis1_idx]
for hypothesis2_idx, hypothesis2 in enumerate(hypotheses):
if hypothesis1_idx == hypothesis2_idx:
continue
if labels[hypothesis2_idx] != curr_label:
continue
score = bleu.corpus_score([hypothesis1], [[hypothesis2]]).score
if score > max_score:
max_score = score
best_sentence = hypothesis2
avg_score += max_score
total += 1
return avg_score / total
def compute_stats(contexts, hypotheses, labels):
words = set()
bigrams = set()
for hypothesis, label in tqdm(zip(hypotheses, labels)):
for word in nltk.word_tokenize(hypothesis):
words.add(word.lower())
for bigram in list(nltk.bigrams(list(nltk.word_tokenize(hypothesis)))):
bigrams.add(bigram)
interbleu_scores = interbleu(hypotheses, labels)
contradiction_rate = np.mean(np.array([a == 'contradiction' for a in labels]))
context_counts = defaultdict(int)
for context in contexts:
context_counts[context] += 1
return words, bigrams, interbleu_scores, contradiction_rate, context_counts
datasets = ['../data/non-adversarial-with-rounds.jsonl', '../data/static-adversarial-with-rounds.jsonl', '../data/dynamic-adversarial-with-rounds.jsonl']
for dataset in datasets:
contexts = []
hypotheses = []
labels = []
for line in open(dataset,'rb'):
myjson = json.loads(line)
contexts.append(myjson['sentence1'])
hypotheses.append(myjson['sentence2'].strip())
labels.append(myjson['label'])
words, bigrams, interbleu_scores, contradiction_rate, context_counts = compute_stats(contexts, hypotheses, labels)
print("Dataset", dataset)
print('Num Unique Words', len(words))
print('Num Unique Bigrams', len(bigrams))
print('Inter-BLEU', interbleu_scores)
print('Contradiction %', contradiction_rate)
print('Num Examples Per Context', context_counts)
| dadc-limit-main | analysis/diversity.py |
# Copyright (c) Meta Platforms, Inc. All Rights Reserved
import argparse
import os
import numpy as np
import torch
from human_body_prior.body_model.body_model import BodyModel
from human_body_prior.tools.rotation_tools import aa2matrot, local2global_pose
from tqdm import tqdm
from utils import utils_transform
def main(args, bm):
for dataroot_subset in ["BioMotionLab_NTroje", "CMU", "MPI_HDM05"]:
print(dataroot_subset)
for phase in ["train", "test"]:
print(phase)
savedir = os.path.join(args.save_dir, dataroot_subset, phase)
if not os.path.exists(savedir):
os.makedirs(savedir)
split_file = os.path.join(
"prepare_data/data_split", dataroot_subset, phase + "_split.txt"
)
with open(split_file, "r") as f:
filepaths = [line.strip() for line in f]
rotation_local_full_gt_list = []
hmd_position_global_full_gt_list = []
body_parms_list = []
head_global_trans_list = []
idx = 0
for filepath in tqdm(filepaths):
data = {}
bdata = np.load(
os.path.join(args.root_dir, filepath), allow_pickle=True
)
if "mocap_framerate" in bdata:
framerate = bdata["mocap_framerate"]
else:
continue
idx += 1
if framerate == 120:
stride = 2
elif framerate == 60:
stride = 1
else:
raise AssertionError(
"Please check your AMASS data, should only have 2 types of framerate, either 120 or 60!!!"
)
bdata_poses = bdata["poses"][::stride, ...]
bdata_trans = bdata["trans"][::stride, ...]
subject_gender = bdata["gender"]
body_parms = {
"root_orient": torch.Tensor(
bdata_poses[:, :3]
), # .to(comp_device), # controls the global root orientation
"pose_body": torch.Tensor(
bdata_poses[:, 3:66]
), # .to(comp_device), # controls the body
"trans": torch.Tensor(
bdata_trans
), # .to(comp_device), # controls the global body position
}
body_parms_list = body_parms
body_pose_world = bm(
**{
k: v
for k, v in body_parms.items()
if k in ["pose_body", "root_orient", "trans"]
}
)
output_aa = torch.Tensor(bdata_poses[:, :66]).reshape(-1, 3)
output_6d = utils_transform.aa2sixd(output_aa).reshape(
bdata_poses.shape[0], -1
)
rotation_local_full_gt_list = output_6d[1:]
rotation_local_matrot = aa2matrot(
torch.tensor(bdata_poses).reshape(-1, 3)
).reshape(bdata_poses.shape[0], -1, 9)
rotation_global_matrot = local2global_pose(
rotation_local_matrot, bm.kintree_table[0].long()
) # rotation of joints relative to the origin
head_rotation_global_matrot = rotation_global_matrot[:, [15], :, :]
rotation_global_6d = utils_transform.matrot2sixd(
rotation_global_matrot.reshape(-1, 3, 3)
).reshape(rotation_global_matrot.shape[0], -1, 6)
input_rotation_global_6d = rotation_global_6d[1:, [15, 20, 21], :]
rotation_velocity_global_matrot = torch.matmul(
torch.inverse(rotation_global_matrot[:-1]),
rotation_global_matrot[1:],
)
rotation_velocity_global_6d = utils_transform.matrot2sixd(
rotation_velocity_global_matrot.reshape(-1, 3, 3)
).reshape(rotation_velocity_global_matrot.shape[0], -1, 6)
input_rotation_velocity_global_6d = rotation_velocity_global_6d[
:, [15, 20, 21], :
]
position_global_full_gt_world = body_pose_world.Jtr[
:, :22, :
] # position of joints relative to the world origin
position_head_world = position_global_full_gt_world[
:, 15, :
] # world position of head
head_global_trans = torch.eye(4).repeat(
position_head_world.shape[0], 1, 1
)
head_global_trans[:, :3, :3] = head_rotation_global_matrot.squeeze()
head_global_trans[:, :3, 3] = position_global_full_gt_world[:, 15, :]
head_global_trans_list = head_global_trans[1:]
num_frames = position_global_full_gt_world.shape[0] - 1
hmd_position_global_full_gt_list = torch.cat(
[
input_rotation_global_6d.reshape(num_frames, -1),
input_rotation_velocity_global_6d.reshape(num_frames, -1),
position_global_full_gt_world[1:, [15, 20, 21], :].reshape(
num_frames, -1
),
position_global_full_gt_world[1:, [15, 20, 21], :].reshape(
num_frames, -1
)
- position_global_full_gt_world[:-1, [15, 20, 21], :].reshape(
num_frames, -1
),
],
dim=-1,
)
data["rotation_local_full_gt_list"] = rotation_local_full_gt_list
data[
"hmd_position_global_full_gt_list"
] = hmd_position_global_full_gt_list
data["body_parms_list"] = body_parms_list
data["head_global_trans_list"] = head_global_trans_list
data["position_global_full_gt_world"] = (
position_global_full_gt_world[1:].cpu().float()
)
data["framerate"] = 60
data["gender"] = subject_gender
data["filepath"] = filepath
torch.save(data, os.path.join(savedir, "{}.pt".format(idx)))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--support_dir",
type=str,
default=None,
help="=dir where you put your smplh and dmpls dirs",
)
parser.add_argument(
"--save_dir",
type=str,
default=None,
help="=dir where you want to save your generated data",
)
parser.add_argument(
"--root_dir", type=str, default=None, help="=dir where you put your AMASS data"
)
args = parser.parse_args()
# Here we follow the AvatarPoser paper and use male model for all sequences
bm_fname_male = os.path.join(args.support_dir, "smplh/{}/model.npz".format("male"))
dmpl_fname_male = os.path.join(
args.support_dir, "dmpls/{}/model.npz".format("male")
)
num_betas = 16 # number of body parameters
num_dmpls = 8 # number of DMPL parameters
bm_male = BodyModel(
bm_fname=bm_fname_male,
num_betas=num_betas,
num_dmpls=num_dmpls,
dmpl_fname=dmpl_fname_male,
)
main(args, bm_male)
| AGRoL-main | prepare_data.py |
# Copyright (c) Meta Platforms, Inc. All Rights Reserved
import math
import os
import random
import numpy as np
import torch
from data_loaders.dataloader import load_data, TestDataset
from human_body_prior.body_model.body_model import BodyModel as BM
from model.networks import PureMLP
from tqdm import tqdm
from utils import utils_transform, utils_visualize
from utils.metrics import get_metric_function
from utils.model_util import create_model_and_diffusion, load_model_wo_clip
from utils.parser_util import sample_args
device = torch.device("cuda")
#####################
RADIANS_TO_DEGREES = 360.0 / (2 * math.pi)
METERS_TO_CENTIMETERS = 100.0
pred_metrics = [
"mpjre",
"mpjpe",
"mpjve",
"handpe",
"upperpe",
"lowerpe",
"rootpe",
"pred_jitter",
]
gt_metrics = [
"gt_jitter",
]
all_metrics = pred_metrics + gt_metrics
RADIANS_TO_DEGREES = 360.0 / (2 * math.pi) # 57.2958 grads
metrics_coeffs = {
"mpjre": RADIANS_TO_DEGREES,
"mpjpe": METERS_TO_CENTIMETERS,
"mpjve": METERS_TO_CENTIMETERS,
"handpe": METERS_TO_CENTIMETERS,
"upperpe": METERS_TO_CENTIMETERS,
"lowerpe": METERS_TO_CENTIMETERS,
"rootpe": METERS_TO_CENTIMETERS,
"pred_jitter": 1.0,
"gt_jitter": 1.0,
"gt_mpjpe": METERS_TO_CENTIMETERS,
"gt_mpjve": METERS_TO_CENTIMETERS,
"gt_handpe": METERS_TO_CENTIMETERS,
"gt_rootpe": METERS_TO_CENTIMETERS,
"gt_upperpe": METERS_TO_CENTIMETERS,
"gt_lowerpe": METERS_TO_CENTIMETERS,
}
#####################
class BodyModel(torch.nn.Module):
def __init__(self, support_dir):
super().__init__()
device = torch.device("cuda")
subject_gender = "male"
bm_fname = os.path.join(
support_dir, "smplh/{}/model.npz".format(subject_gender)
)
dmpl_fname = os.path.join(
support_dir, "dmpls/{}/model.npz".format(subject_gender)
)
num_betas = 16 # number of body parameters
num_dmpls = 8 # number of DMPL parameters
body_model = BM(
bm_fname=bm_fname,
num_betas=num_betas,
num_dmpls=num_dmpls,
dmpl_fname=dmpl_fname,
).to(device)
self.body_model = body_model.eval()
def forward(self, body_params):
with torch.no_grad():
body_pose = self.body_model(
**{
k: v
for k, v in body_params.items()
if k in ["pose_body", "trans", "root_orient"]
}
)
return body_pose
def non_overlapping_test(
args,
data,
sample_fn,
dataset,
model,
num_per_batch=256,
model_type="mlp",
):
gt_data, sparse_original, body_param, head_motion, filename = (
data[0],
data[1],
data[2],
data[3],
data[4],
)
gt_data = gt_data.cuda().float()
sparse_original = sparse_original.cuda().float()
head_motion = head_motion.cuda().float()
num_frames = head_motion.shape[0]
output_samples = []
count = 0
sparse_splits = []
flag_index = None
if args.input_motion_length <= num_frames:
while count < num_frames:
if count + args.input_motion_length > num_frames:
tmp_k = num_frames - args.input_motion_length
sub_sparse = sparse_original[
:, tmp_k : tmp_k + args.input_motion_length
]
flag_index = count - tmp_k
else:
sub_sparse = sparse_original[
:, count : count + args.input_motion_length
]
sparse_splits.append(sub_sparse)
count += args.input_motion_length
else:
flag_index = args.input_motion_length - num_frames
tmp_init = sparse_original[:, :1].repeat(1, flag_index, 1).clone()
sub_sparse = torch.concat([tmp_init, sparse_original], dim=1)
sparse_splits = [sub_sparse]
n_steps = len(sparse_splits) // num_per_batch
if len(sparse_splits) % num_per_batch > 0:
n_steps += 1
# Split the sequence into n_steps non-overlapping batches
if args.fix_noise:
# fix noise seed for every frame
noise = torch.randn(1, 1, 1).cuda()
noise = noise.repeat(1, args.input_motion_length, args.motion_nfeat)
else:
noise = None
for step_index in range(n_steps):
sparse_per_batch = torch.cat(
sparse_splits[
step_index * num_per_batch : (step_index + 1) * num_per_batch
],
dim=0,
)
new_batch_size = sparse_per_batch.shape[0]
if model_type == "diffusion":
sample = sample_fn(
model,
(new_batch_size, args.input_motion_length, args.motion_nfeat),
sparse=sparse_per_batch,
clip_denoised=False,
model_kwargs=None,
skip_timesteps=0,
init_image=None,
progress=False,
dump_steps=None,
noise=noise,
const_noise=False,
)
elif model_type == "mlp":
sample = model(sparse_per_batch)
if flag_index is not None and step_index == n_steps - 1:
last_batch = sample[-1]
last_batch = last_batch[flag_index:]
sample = sample[:-1].reshape(-1, args.motion_nfeat)
sample = torch.cat([sample, last_batch], dim=0)
else:
sample = sample.reshape(-1, args.motion_nfeat)
if not args.no_normalization:
output_samples.append(dataset.inv_transform(sample.cpu().float()))
else:
output_samples.append(sample.cpu().float())
return output_samples, body_param, head_motion, filename
def overlapping_test(
args,
data,
sample_fn,
dataset,
model,
sld_wind_size=70,
model_type="diffusion",
):
assert (
model_type == "diffusion"
), "currently only diffusion model supports overlapping test!!!"
gt_data, sparse_original, body_param, head_motion, filename = (
data[0],
data[1],
data[2],
data[3],
data[4],
)
gt_data = gt_data.cuda().float()
sparse_original = sparse_original.cuda().float()
head_motion = head_motion.cuda().float()
num_frames = head_motion.shape[0]
output_samples = []
count = 0
sparse_splits = []
flag_index = None
if num_frames < args.input_motion_length:
flag_index = args.input_motion_length - num_frames
tmp_init = sparse_original[:, :1].repeat(1, flag_index, 1).clone()
sub_sparse = torch.concat([tmp_init, sparse_original], dim=1)
sparse_splits = [sub_sparse]
else:
while count + args.input_motion_length <= num_frames:
if count == 0:
sub_sparse = sparse_original[
:, count : count + args.input_motion_length
]
tmp_idx = 0
else:
sub_sparse = sparse_original[
:, count : count + args.input_motion_length
]
tmp_idx = args.input_motion_length - sld_wind_size
sparse_splits.append([sub_sparse, tmp_idx])
count += sld_wind_size
if count < num_frames:
sub_sparse = sparse_original[:, -args.input_motion_length :]
tmp_idx = args.input_motion_length - (
num_frames - (count - sld_wind_size + args.input_motion_length)
)
sparse_splits.append([sub_sparse, tmp_idx])
memory = None # init memory
if args.fix_noise:
# fix noise seed for every frame
noise = torch.randn(1, 1, 1).cuda()
noise = noise.repeat(1, args.input_motion_length, args.motion_nfeat)
else:
noise = None
for step_index in range(len(sparse_splits)):
sparse_per_batch = sparse_splits[step_index][0]
memory_end_index = sparse_splits[step_index][1]
new_batch_size = sparse_per_batch.shape[0]
assert new_batch_size == 1
if memory is not None:
model_kwargs = {}
model_kwargs["y"] = {}
model_kwargs["y"]["inpainting_mask"] = torch.zeros(
(
new_batch_size,
args.input_motion_length,
args.motion_nfeat,
)
).cuda()
model_kwargs["y"]["inpainting_mask"][:, :memory_end_index, :] = 1
model_kwargs["y"]["inpainted_motion"] = torch.zeros(
(
new_batch_size,
args.input_motion_length,
args.motion_nfeat,
)
).cuda()
model_kwargs["y"]["inpainted_motion"][:, :memory_end_index, :] = memory[
:, -memory_end_index:, :
]
else:
model_kwargs = None
sample = sample_fn(
model,
(new_batch_size, args.input_motion_length, args.motion_nfeat),
sparse=sparse_per_batch,
clip_denoised=False,
model_kwargs=None,
skip_timesteps=0,
init_image=None,
progress=False,
dump_steps=None,
noise=noise,
const_noise=False,
)
memory = sample.clone().detach()
if flag_index is not None:
sample = sample[:, flag_index:].cpu().reshape(-1, args.motion_nfeat)
else:
sample = sample[:, memory_end_index:].reshape(-1, args.motion_nfeat)
if not args.no_normalization:
output_samples.append(dataset.inv_transform(sample.cpu().float()))
else:
output_samples.append(sample.cpu().float())
return output_samples, body_param, head_motion, filename
def evaluate_prediction(
args,
metrics,
sample,
body_model,
sample_index,
head_motion,
body_param,
fps,
filename,
):
motion_pred = sample.squeeze().cuda()
# Get the prediction from the model
model_rot_input = (
utils_transform.sixd2aa(motion_pred.reshape(-1, 6).detach())
.reshape(motion_pred.shape[0], -1)
.float()
)
T_head2world = head_motion.clone().cuda()
t_head2world = T_head2world[:, :3, 3].clone()
# Get the offset between the head and other joints using forward kinematic model
body_pose_local = body_model(
{
"pose_body": model_rot_input[..., 3:66],
"root_orient": model_rot_input[..., :3],
}
).Jtr
# Get the offset in global coordiante system between head and body_world.
t_head2root = -body_pose_local[:, 15, :]
t_root2world = t_head2root + t_head2world.cuda()
predicted_body = body_model(
{
"pose_body": model_rot_input[..., 3:66],
"root_orient": model_rot_input[..., :3],
"trans": t_root2world,
}
)
predicted_position = predicted_body.Jtr[:, :22, :]
# Get the predicted position and rotation
predicted_angle = model_rot_input
for k, v in body_param.items():
body_param[k] = v.squeeze().cuda()
body_param[k] = body_param[k][-predicted_angle.shape[0] :, ...]
# Get the ground truth position from the model
gt_body = body_model(body_param)
gt_position = gt_body.Jtr[:, :22, :]
# Create animation
if args.vis:
video_dir = args.output_dir
if not os.path.exists(video_dir):
os.makedirs(video_dir)
save_filename = filename.split(".")[0].replace("/", "-")
save_video_path = os.path.join(video_dir, save_filename + ".mp4")
utils_visualize.save_animation(
body_pose=predicted_body,
savepath=save_video_path,
bm=body_model.body_model,
fps=fps,
resolution=(800, 800),
)
save_video_path_gt = os.path.join(video_dir, save_filename + "_gt.mp4")
if not os.path.exists(save_video_path_gt):
utils_visualize.save_animation(
body_pose=gt_body,
savepath=save_video_path_gt,
bm=body_model.body_model,
fps=fps,
resolution=(800, 800),
)
gt_angle = body_param["pose_body"]
gt_root_angle = body_param["root_orient"]
predicted_root_angle = predicted_angle[:, :3]
predicted_angle = predicted_angle[:, 3:]
upper_index = [3, 6, 9, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]
lower_index = [0, 1, 2, 4, 5, 7, 8, 10, 11]
eval_log = {}
for metric in metrics:
eval_log[metric] = (
get_metric_function(metric)(
predicted_position,
predicted_angle,
predicted_root_angle,
gt_position,
gt_angle,
gt_root_angle,
upper_index,
lower_index,
fps,
)
.cpu()
.numpy()
)
torch.cuda.empty_cache()
return eval_log
def load_diffusion_model(args):
print("Creating model and diffusion...")
args.arch = args.arch[len("diffusion_") :]
model, diffusion = create_model_and_diffusion(args)
print(f"Loading checkpoints from [{args.model_path}]...")
state_dict = torch.load(args.model_path, map_location="cpu")
load_model_wo_clip(model, state_dict)
model.to("cuda:0") # dist_util.dev())
model.eval() # disable random masking
return model, diffusion
def load_mlp_model(args):
model = PureMLP(
args.latent_dim,
args.input_motion_length,
args.layers,
args.sparse_dim,
args.motion_nfeat,
)
model.eval()
state_dict = torch.load(args.model_path, map_location="cpu")
model.load_state_dict(state_dict)
model.to("cuda:0")
return model, None
def main():
args = sample_args()
torch.backends.cudnn.benchmark = False
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
fps = 60 # AMASS dataset requires 60 frames per second
body_model = BodyModel(args.support_dir)
print("Loading dataset...")
filename_list, all_info, mean, std = load_data(
args.dataset,
args.dataset_path,
"test",
)
dataset = TestDataset(
args.dataset,
mean,
std,
all_info,
filename_list,
)
log = {}
for metric in all_metrics:
log[metric] = 0
model_type = args.arch.split("_")[0]
if model_type == "diffusion":
model, diffusion = load_diffusion_model(args)
sample_fn = diffusion.p_sample_loop
elif model_type == "mlp":
model, _ = load_mlp_model(args)
sample_fn = None
else:
raise ValueError(f"Unknown model type {model_type}")
if not args.overlapping_test:
test_func = non_overlapping_test
# batch size in the case of non-overlapping testing
n_testframe = args.num_per_batch
else:
print("Overlapping testing...")
test_func = overlapping_test
# sliding window size in case of overlapping testing
n_testframe = args.sld_wind_size
for sample_index in tqdm(range(len(dataset))):
output, body_param, head_motion, filename = test_func(
args,
dataset[sample_index],
sample_fn,
dataset,
model,
n_testframe,
model_type=model_type,
)
sample = torch.cat(output, axis=0)
instance_log = evaluate_prediction(
args,
all_metrics,
sample,
body_model,
sample_index,
head_motion,
body_param,
fps,
filename,
)
for key in instance_log:
log[key] += instance_log[key]
# Print the value for all the metrics
print("Metrics for the predictions")
for metric in pred_metrics:
print(log[metric] / len(dataset) * metrics_coeffs[metric])
print("Metrics for the ground truth")
for metric in gt_metrics:
print(metric, log[metric] / len(dataset) * metrics_coeffs[metric])
if __name__ == "__main__":
main()
| AGRoL-main | test.py |
# Copyright (c) Meta Platforms, Inc. All Rights Reserved
import json
import os
import random
import numpy as np
import torch
from data_loaders.dataloader import get_dataloader, load_data, TrainDataset
from model.networks import PureMLP
from runner.train_mlp import train_step
from runner.training_loop import TrainLoop
from utils import dist_util
from utils.model_util import create_model_and_diffusion
from utils.parser_util import train_args
def train_diffusion_model(args, dataloader):
print("creating model and diffusion...")
args.arch = args.arch[len("diffusion_") :]
num_gpus = torch.cuda.device_count()
args.num_workers = args.num_workers * num_gpus
model, diffusion = create_model_and_diffusion(args)
if num_gpus > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
dist_util.setup_dist()
model = torch.nn.DataParallel(model).cuda()
print(
"Total params: %.2fM"
% (sum(p.numel() for p in model.module.parameters()) / 1000000.0)
)
else:
dist_util.setup_dist(args.device)
model.to(dist_util.dev())
print(
"Total params: %.2fM"
% (sum(p.numel() for p in model.parameters()) / 1000000.0)
)
print("Training...")
TrainLoop(args, model, diffusion, dataloader).run_loop()
print("Done.")
def train_mlp_model(args, dataloader):
print("creating MLP model...")
args.arch = args.arch[len("mlp_") :]
num_gpus = torch.cuda.device_count()
args.num_workers = args.num_workers * num_gpus
model = PureMLP(
args.latent_dim,
args.input_motion_length,
args.layers,
args.sparse_dim,
args.motion_nfeat,
)
model.train()
if num_gpus > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
dist_util.setup_dist()
model = torch.nn.DataParallel(model).cuda()
print(
"Total params: %.2fM"
% (sum(p.numel() for p in model.module.parameters()) / 1000000.0)
)
else:
dist_util.setup_dist(args.device)
model.to(dist_util.dev())
print(
"Total params: %.2fM"
% (sum(p.numel() for p in model.parameters()) / 1000000.0)
)
# initialize optimizer
optimizer = torch.optim.Adam(
model.parameters(), lr=args.lr, weight_decay=args.weight_decay
)
nb_iter = 0
avg_loss = 0.0
avg_lr = 0.0
while (nb_iter + 1) < args.num_steps:
for (motion_target, motion_input) in dataloader:
loss, optimizer, current_lr = train_step(
motion_input,
motion_target,
model,
optimizer,
nb_iter,
args.num_steps,
args.lr,
args.lr / 10.0,
dist_util.dev(),
args.lr_anneal_steps,
)
avg_loss += loss
avg_lr += current_lr
if (nb_iter + 1) % args.log_interval == 0:
avg_loss = avg_loss / args.log_interval
avg_lr = avg_lr / args.log_interval
print("Iter {} Summary: ".format(nb_iter + 1))
print(f"\t lr: {avg_lr} \t Training loss: {avg_loss}")
avg_loss = 0
avg_lr = 0
if (nb_iter + 1) == args.num_steps:
break
nb_iter += 1
with open(
os.path.join(args.save_dir, "model-iter-" + str(nb_iter + 1) + ".pth"),
"wb",
) as f:
torch.save(model.state_dict(), f)
def main():
args = train_args()
torch.backends.cudnn.benchmark = False
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.save_dir is None:
raise FileNotFoundError("save_dir was not specified.")
elif os.path.exists(args.save_dir) and not args.overwrite:
raise FileExistsError("save_dir [{}] already exists.".format(args.save_dir))
elif not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
args_path = os.path.join(args.save_dir, "args.json")
with open(args_path, "w") as fw:
json.dump(vars(args), fw, indent=4, sort_keys=True)
print("creating data loader...")
motions, sparses, mean, std = load_data(
args.dataset,
args.dataset_path,
"train",
input_motion_length=args.input_motion_length,
)
dataset = TrainDataset(
args.dataset,
mean,
std,
motions,
sparses,
args.input_motion_length,
args.train_dataset_repeat_times,
args.no_normalization,
)
dataloader = get_dataloader(
dataset, "train", batch_size=args.batch_size, num_workers=args.num_workers
)
# args.lr_anneal_steps = (
# args.lr_anneal_steps // args.train_dataset_repeat_times
# ) * len(
# dataloader
# ) # the input lr_anneal_steps is by epoch, here convert it to the number of steps
model_type = args.arch.split("_")[0]
if model_type == "diffusion":
train_diffusion_model(args, dataloader)
elif model_type == "mlp":
train_mlp_model(args, dataloader)
if __name__ == "__main__":
main()
| AGRoL-main | train.py |
# MIT License
# Copyright (c) 2021 OpenAI
#
# This code is based on https://github.com/openai/guided-diffusion
# MIT License
# Copyright (c) 2022 Guy Tevet
#
# This code is based on https://github.com/GuyTevet/motion-diffusion-model
# Copyright (c) Meta Platforms, Inc. All Rights Reserved
import functools
import os
import torch
from diffusion import logger
from diffusion.fp16_util import MixedPrecisionTrainer
from diffusion.resample import create_named_schedule_sampler, LossAwareSampler
from torch.optim import AdamW
from tqdm import tqdm
from utils import dist_util
class TrainLoop:
def __init__(self, args, model, diffusion, data):
self.args = args
self.dataset = args.dataset
self.model = model
self.diffusion = diffusion
self.data = data
self.batch_size = args.batch_size
self.lr = args.lr
self.log_interval = args.log_interval
self.save_interval = args.save_interval
self.resume_checkpoint = args.resume_checkpoint
self.load_optimizer = args.load_optimizer
self.use_fp16 = False
self.fp16_scale_growth = 1e-3
self.weight_decay = args.weight_decay
self.lr_anneal_steps = args.lr_anneal_steps
self.step = 0
self.resume_step = 0
self.global_batch = self.batch_size
self.num_steps = args.num_steps
self.num_epochs = self.num_steps // len(self.data) + 1
self.sync_cuda = torch.cuda.is_available()
self._load_and_sync_parameters()
self.mp_trainer = MixedPrecisionTrainer(
model=self.model,
use_fp16=self.use_fp16,
fp16_scale_growth=self.fp16_scale_growth,
)
self.save_dir = args.save_dir
self.overwrite = args.overwrite
self.opt = AdamW(
self.mp_trainer.master_params, lr=self.lr, weight_decay=self.weight_decay
)
if self.resume_step and self.load_optimizer:
self._load_optimizer_state()
self.device = torch.device("cpu")
if torch.cuda.is_available() and dist_util.dev() != "cpu":
self.device = torch.device(dist_util.dev())
self.schedule_sampler_type = "uniform"
self.schedule_sampler = create_named_schedule_sampler(
self.schedule_sampler_type, diffusion
)
self.eval_wrapper, self.eval_data, self.eval_gt_data = None, None, None
self.use_ddp = False
self.ddp_model = self.model
def _load_and_sync_parameters(self):
resume_checkpoint = self.resume_checkpoint
if resume_checkpoint:
self.resume_step = parse_resume_step_from_filename(resume_checkpoint)
logger.log(f"loading model from checkpoint: {resume_checkpoint}...")
self.model.load_state_dict(
dist_util.load_state_dict(
resume_checkpoint,
map_location=dist_util.dev(),
)
)
def _load_optimizer_state(self):
main_checkpoint = self.resume_checkpoint
opt_checkpoint = os.path.join(
os.path.dirname(main_checkpoint), f"opt{self.resume_step:09}.pt"
)
logger.log(f"loading optimizer state from checkpoint: {opt_checkpoint}")
assert os.path.exists(opt_checkpoint), "optimiser states does not exist."
state_dict = dist_util.load_state_dict(
opt_checkpoint, map_location=dist_util.dev()
)
self.opt.load_state_dict(state_dict)
def run_loop(self):
for epoch in range(self.num_epochs):
print(f"Starting epoch {epoch}")
for motion, cond in tqdm(self.data):
motion = motion.to(self.device)
cond = cond.to(self.device)
self.run_step(motion, cond)
self.step += 1
if epoch % self.save_interval == 0:
self.save()
if epoch % self.log_interval == 0:
for k, v in logger.get_current().name2val.items():
if k == "loss":
print("epoch[{}]: loss[{:0.5f}]".format(epoch, v))
print("lr:", self.lr)
# Save the last checkpoint if it wasn't already saved.
if (self.step - 1) % self.save_interval != 0:
self.save()
def run_step(self, batch, cond):
self.forward_backward(batch, cond)
self.mp_trainer.optimize(self.opt)
self._step_lr()
self.log_step()
def forward_backward(self, batch, cond):
self.mp_trainer.zero_grad()
t, weights = self.schedule_sampler.sample(batch.shape[0], dist_util.dev())
compute_losses = functools.partial(
self.diffusion.training_losses,
self.ddp_model,
batch,
t,
cond,
dataset=self.data.dataset,
)
losses = compute_losses()
if isinstance(self.schedule_sampler, LossAwareSampler):
self.schedule_sampler.update_with_local_losses(t, losses["loss"].detach())
loss = (losses["loss"] * weights).mean()
log_loss_dict(self.diffusion, t, {k: v * weights for k, v in losses.items()})
self.mp_trainer.backward(loss)
def _anneal_lr(self):
if not self.lr_anneal_steps:
return
frac_done = (self.step + self.resume_step) / self.lr_anneal_steps
lr = self.lr * (1 - frac_done)
for param_group in self.opt.param_groups:
param_group["lr"] = lr
def _step_lr(self):
# One-step learning rate decay if needed.
if not self.lr_anneal_steps:
return
if (self.step + self.resume_step) > self.lr_anneal_steps:
self.lr = self.lr / 30.0
self.lr_anneal_steps = False
else:
self.lr = self.lr
for param_group in self.opt.param_groups:
param_group["lr"] = self.lr
def log_step(self):
logger.logkv("step", self.step + self.resume_step)
logger.logkv("samples", (self.step + self.resume_step + 1) * self.global_batch)
def ckpt_file_name(self):
return f"model{(self.step+self.resume_step):09d}.pt"
def save(self):
def save_checkpoint(params):
state_dict = self.mp_trainer.master_params_to_state_dict(params)
logger.log("saving model...")
filename = self.ckpt_file_name()
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
with open(
os.path.join(self.save_dir, filename),
"wb",
) as f:
torch.save(state_dict, f)
save_checkpoint(self.mp_trainer.master_params)
with open(
os.path.join(self.save_dir, f"opt{(self.step+self.resume_step):09d}.pt"),
"wb",
) as f:
torch.save(self.opt.state_dict(), f)
def parse_resume_step_from_filename(filename):
"""
Parse filenames of the form path/to/modelNNNNNN.pt, where NNNNNN is the
checkpoint's number of steps.
"""
split = filename.split("model")
if len(split) < 2:
return 0
split1 = split[-1].split(".")[0]
try:
return int(split1)
except ValueError:
return 0
def log_loss_dict(diffusion, ts, losses):
for key, values in losses.items():
logger.logkv_mean(key, values.mean().item())
# Log the quantiles (four quartiles, in particular).
for sub_t, sub_loss in zip(ts.cpu().numpy(), values.detach().cpu().numpy()):
quartile = int(4 * sub_t / diffusion.num_timesteps)
logger.logkv_mean(f"{key}_q{quartile}", sub_loss)
| AGRoL-main | runner/training_loop.py |
# Copyright (c) Meta Platforms, Inc. All Rights Reserved
import torch
def update_lr_multistep(
nb_iter, total_iter, max_lr, min_lr, optimizer, lr_anneal_steps
):
if nb_iter > lr_anneal_steps:
current_lr = min_lr
else:
current_lr = max_lr
for param_group in optimizer.param_groups:
param_group["lr"] = current_lr
return optimizer, current_lr
def train_step(
motion_input,
motion_target,
model,
optimizer,
nb_iter,
total_iter,
max_lr,
min_lr,
device,
lr_anneal_steps,
):
motion_input = motion_input.to(device)
motion_target = motion_target.to(device)
motion_pred = model(motion_input)
loss = torch.mean(
torch.norm(
(motion_pred - motion_target).reshape(-1, 6),
2,
1,
)
)
optimizer.zero_grad()
loss.backward()
optimizer.step()
optimizer, current_lr = update_lr_multistep(
nb_iter, total_iter, max_lr, min_lr, optimizer, lr_anneal_steps
)
return loss.item(), optimizer, current_lr
| AGRoL-main | runner/train_mlp.py |
# Copyright (c) Meta Platforms, Inc. All Rights Reserved
# Metric functions with same inputs
import numpy as np
import torch
def pred_jitter(
predicted_position,
predicted_angle,
predicted_root_angle,
gt_position,
gt_angle,
gt_root_angle,
upper_index,
lower_index,
fps,
):
pred_jitter = (
(
(
predicted_position[3:]
- 3 * predicted_position[2:-1]
+ 3 * predicted_position[1:-2]
- predicted_position[:-3]
)
* (fps**3)
)
.norm(dim=2)
.mean()
)
return pred_jitter
def gt_jitter(
predicted_position,
predicted_angle,
predicted_root_angle,
gt_position,
gt_angle,
gt_root_angle,
upper_index,
lower_index,
fps,
):
gt_jitter = (
(
(
gt_position[3:]
- 3 * gt_position[2:-1]
+ 3 * gt_position[1:-2]
- gt_position[:-3]
)
* (fps**3)
)
.norm(dim=2)
.mean()
)
return gt_jitter
def mpjre(
predicted_position,
predicted_angle,
predicted_root_angle,
gt_position,
gt_angle,
gt_root_angle,
upper_index,
lower_index,
fps,
):
diff = gt_angle - predicted_angle
diff[diff > np.pi] = diff[diff > np.pi] - 2 * np.pi
diff[diff < -np.pi] = diff[diff < -np.pi] + 2 * np.pi
rot_error = torch.mean(torch.absolute(diff))
return rot_error
def mpjpe(
predicted_position,
predicted_angle,
predicted_root_angle,
gt_position,
gt_angle,
gt_root_angle,
upper_index,
lower_index,
fps,
):
pos_error = torch.mean(
torch.sqrt(torch.sum(torch.square(gt_position - predicted_position), axis=-1))
)
return pos_error
def handpe(
predicted_position,
predicted_angle,
predicted_root_angle,
gt_position,
gt_angle,
gt_root_angle,
upper_index,
lower_index,
fps,
):
pos_error_hands = torch.mean(
torch.sqrt(torch.sum(torch.square(gt_position - predicted_position), axis=-1))[
..., [20, 21]
]
)
return pos_error_hands
def upperpe(
predicted_position,
predicted_angle,
predicted_root_angle,
gt_position,
gt_angle,
gt_root_angle,
upper_index,
lower_index,
fps,
):
upper_body_error = torch.mean(
torch.sqrt(torch.sum(torch.square(gt_position - predicted_position), axis=-1))[
..., upper_index
]
)
return upper_body_error
def lowerpe(
predicted_position,
predicted_angle,
predicted_root_angle,
gt_position,
gt_angle,
gt_root_angle,
upper_index,
lower_index,
fps,
):
lower_body_error = torch.mean(
torch.sqrt(torch.sum(torch.square(gt_position - predicted_position), axis=-1))[
..., lower_index
]
)
return lower_body_error
def rootpe(
predicted_position,
predicted_angle,
predicted_root_angle,
gt_position,
gt_angle,
gt_root_angle,
upper_index,
lower_index,
fps,
):
pos_error_root = torch.mean(
torch.sqrt(torch.sum(torch.square(gt_position - predicted_position), axis=-1))[
..., [0]
]
)
return pos_error_root
def mpjve(
predicted_position,
predicted_angle,
predicted_root_angle,
gt_position,
gt_angle,
gt_root_angle,
upper_index,
lower_index,
fps,
):
gt_velocity = (gt_position[1:, ...] - gt_position[:-1, ...]) * fps
predicted_velocity = (
predicted_position[1:, ...] - predicted_position[:-1, ...]
) * fps
vel_error = torch.mean(
torch.sqrt(torch.sum(torch.square(gt_velocity - predicted_velocity), axis=-1))
)
return vel_error
metric_funcs_dict = {
"mpjre": mpjre,
"mpjpe": mpjpe,
"mpjve": mpjve,
"handpe": handpe,
"upperpe": upperpe,
"lowerpe": lowerpe,
"rootpe": rootpe,
"pred_jitter": pred_jitter,
"gt_jitter": gt_jitter,
}
def get_metric_function(metric):
return metric_funcs_dict[metric]
| AGRoL-main | utils/metrics.py |
import os
SMPL_DATA_PATH = "./body_models/smpl"
SMPL_KINTREE_PATH = os.path.join(SMPL_DATA_PATH, "kintree_table.pkl")
SMPL_MODEL_PATH = os.path.join(SMPL_DATA_PATH, "SMPL_NEUTRAL.pkl")
JOINT_REGRESSOR_TRAIN_EXTRA = os.path.join(SMPL_DATA_PATH, "J_regressor_extra.npy")
ROT_CONVENTION_TO_ROT_NUMBER = {
"legacy": 23,
"no_hands": 21,
"full_hands": 51,
"mitten_hands": 33,
}
GENDERS = ["neutral", "male", "female"]
NUM_BETAS = 10
| AGRoL-main | utils/config.py |
# MIT License
# Copyright (c) 2022 Guy Tevet
#
# This code is based on https://github.com/GuyTevet/motion-diffusion-model
# Copyright (c) Meta Platforms, Inc. All Rights Reserved
from diffusion import gaussian_diffusion as gd
from diffusion.respace import space_timesteps, SpacedDiffusion
from model.meta_model import MetaModel
def load_model_wo_clip(model, state_dict):
missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
if len(unexpected_keys) != 0:
state_dict_new = {}
for key in state_dict.keys():
state_dict_new[key.replace("module.", "")] = state_dict[key]
missing_keys, unexpected_keys = model.load_state_dict(
state_dict_new, strict=False
)
assert len(unexpected_keys) == 0
assert all([k.startswith("clip_model.") for k in missing_keys])
def create_model_and_diffusion(args):
model = MetaModel(**get_model_args(args))
diffusion = create_gaussian_diffusion(args)
return model, diffusion
def get_model_args(args):
return {
"arch": args.arch,
"nfeats": args.motion_nfeat,
"latent_dim": args.latent_dim,
"sparse_dim": args.sparse_dim,
"num_layers": args.layers,
"dropout": 0.1,
"cond_mask_prob": args.cond_mask_prob,
"dataset": args.dataset,
"input_motion_length": args.input_motion_length,
}
def create_gaussian_diffusion(args):
predict_xstart = True
steps = args.diffusion_steps # 1000
scale_beta = 1.0
timestep_respacing = args.timestep_respacing
learn_sigma = False
rescale_timesteps = False
betas = gd.get_named_beta_schedule(args.noise_schedule, steps, scale_beta)
loss_type = gd.LossType.MSE
if not timestep_respacing:
timestep_respacing = [steps]
return SpacedDiffusion(
dataset=args.dataset,
use_timesteps=space_timesteps(steps, timestep_respacing),
betas=betas,
model_mean_type=(
gd.ModelMeanType.EPSILON if not predict_xstart else gd.ModelMeanType.START_X
),
model_var_type=(
(
gd.ModelVarType.FIXED_LARGE
if not args.sigma_small
else gd.ModelVarType.FIXED_SMALL
)
if not learn_sigma
else gd.ModelVarType.LEARNED_RANGE
),
loss_type=loss_type,
rescale_timesteps=rescale_timesteps,
)
| AGRoL-main | utils/model_util.py |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# Check PYTORCH3D_LICENCE before use
import functools
from typing import Optional
import torch
import torch.nn.functional as F
"""
The transformation matrices returned from the functions in this file assume
the points on which the transformation will be applied are column vectors.
i.e. the R matrix is structured as
R = [
[Rxx, Rxy, Rxz],
[Ryx, Ryy, Ryz],
[Rzx, Rzy, Rzz],
] # (3, 3)
This matrix can be applied to column vectors by post multiplication
by the points e.g.
points = [[0], [1], [2]] # (3 x 1) xyz coordinates of a point
transformed_points = R * points
To apply the same matrix to points which are row vectors, the R matrix
can be transposed and pre multiplied by the points:
e.g.
points = [[0, 1, 2]] # (1 x 3) xyz coordinates of a point
transformed_points = points * R.transpose(1, 0)
"""
def quaternion_to_matrix(quaternions):
"""
Convert rotations given as quaternions to rotation matrices.
Args:
quaternions: quaternions with real part first,
as tensor of shape (..., 4).
Returns:
Rotation matrices as tensor of shape (..., 3, 3).
"""
r, i, j, k = torch.unbind(quaternions, -1)
two_s = 2.0 / (quaternions * quaternions).sum(-1)
o = torch.stack(
(
1 - two_s * (j * j + k * k),
two_s * (i * j - k * r),
two_s * (i * k + j * r),
two_s * (i * j + k * r),
1 - two_s * (i * i + k * k),
two_s * (j * k - i * r),
two_s * (i * k - j * r),
two_s * (j * k + i * r),
1 - two_s * (i * i + j * j),
),
-1,
)
return o.reshape(quaternions.shape[:-1] + (3, 3))
def _copysign(a, b):
"""
Return a tensor where each element has the absolute value taken from the,
corresponding element of a, with sign taken from the corresponding
element of b. This is like the standard copysign floating-point operation,
but is not careful about negative 0 and NaN.
Args:
a: source tensor.
b: tensor whose signs will be used, of the same shape as a.
Returns:
Tensor of the same shape as a with the signs of b.
"""
signs_differ = (a < 0) != (b < 0)
return torch.where(signs_differ, -a, a)
def _sqrt_positive_part(x):
"""
Returns torch.sqrt(torch.max(0, x))
but with a zero subgradient where x is 0.
"""
ret = torch.zeros_like(x)
positive_mask = x > 0
ret[positive_mask] = torch.sqrt(x[positive_mask])
return ret
def matrix_to_quaternion(matrix):
"""
Convert rotations given as rotation matrices to quaternions.
Args:
matrix: Rotation matrices as tensor of shape (..., 3, 3).
Returns:
quaternions with real part first, as tensor of shape (..., 4).
"""
if matrix.size(-1) != 3 or matrix.size(-2) != 3:
raise ValueError(f"Invalid rotation matrix shape f{matrix.shape}.")
m00 = matrix[..., 0, 0]
m11 = matrix[..., 1, 1]
m22 = matrix[..., 2, 2]
o0 = 0.5 * _sqrt_positive_part(1 + m00 + m11 + m22)
x = 0.5 * _sqrt_positive_part(1 + m00 - m11 - m22)
y = 0.5 * _sqrt_positive_part(1 - m00 + m11 - m22)
z = 0.5 * _sqrt_positive_part(1 - m00 - m11 + m22)
o1 = _copysign(x, matrix[..., 2, 1] - matrix[..., 1, 2])
o2 = _copysign(y, matrix[..., 0, 2] - matrix[..., 2, 0])
o3 = _copysign(z, matrix[..., 1, 0] - matrix[..., 0, 1])
return torch.stack((o0, o1, o2, o3), -1)
def _axis_angle_rotation(axis: str, angle):
"""
Return the rotation matrices for one of the rotations about an axis
of which Euler angles describe, for each value of the angle given.
Args:
axis: Axis label "X" or "Y or "Z".
angle: any shape tensor of Euler angles in radians
Returns:
Rotation matrices as tensor of shape (..., 3, 3).
"""
cos = torch.cos(angle)
sin = torch.sin(angle)
one = torch.ones_like(angle)
zero = torch.zeros_like(angle)
if axis == "X":
R_flat = (one, zero, zero, zero, cos, -sin, zero, sin, cos)
if axis == "Y":
R_flat = (cos, zero, sin, zero, one, zero, -sin, zero, cos)
if axis == "Z":
R_flat = (cos, -sin, zero, sin, cos, zero, zero, zero, one)
return torch.stack(R_flat, -1).reshape(angle.shape + (3, 3))
def euler_angles_to_matrix(euler_angles, convention: str):
"""
Convert rotations given as Euler angles in radians to rotation matrices.
Args:
euler_angles: Euler angles in radians as tensor of shape (..., 3).
convention: Convention string of three uppercase letters from
{"X", "Y", and "Z"}.
Returns:
Rotation matrices as tensor of shape (..., 3, 3).
"""
if euler_angles.dim() == 0 or euler_angles.shape[-1] != 3:
raise ValueError("Invalid input euler angles.")
if len(convention) != 3:
raise ValueError("Convention must have 3 letters.")
if convention[1] in (convention[0], convention[2]):
raise ValueError(f"Invalid convention {convention}.")
for letter in convention:
if letter not in ("X", "Y", "Z"):
raise ValueError(f"Invalid letter {letter} in convention string.")
matrices = map(_axis_angle_rotation, convention, torch.unbind(euler_angles, -1))
return functools.reduce(torch.matmul, matrices)
def _angle_from_tan(
axis: str, other_axis: str, data, horizontal: bool, tait_bryan: bool
):
"""
Extract the first or third Euler angle from the two members of
the matrix which are positive constant times its sine and cosine.
Args:
axis: Axis label "X" or "Y or "Z" for the angle we are finding.
other_axis: Axis label "X" or "Y or "Z" for the middle axis in the
convention.
data: Rotation matrices as tensor of shape (..., 3, 3).
horizontal: Whether we are looking for the angle for the third axis,
which means the relevant entries are in the same row of the
rotation matrix. If not, they are in the same column.
tait_bryan: Whether the first and third axes in the convention differ.
Returns:
Euler Angles in radians for each matrix in dataset as a tensor
of shape (...).
"""
i1, i2 = {"X": (2, 1), "Y": (0, 2), "Z": (1, 0)}[axis]
if horizontal:
i2, i1 = i1, i2
even = (axis + other_axis) in ["XY", "YZ", "ZX"]
if horizontal == even:
return torch.atan2(data[..., i1], data[..., i2])
if tait_bryan:
return torch.atan2(-data[..., i2], data[..., i1])
return torch.atan2(data[..., i2], -data[..., i1])
def _index_from_letter(letter: str):
if letter == "X":
return 0
if letter == "Y":
return 1
if letter == "Z":
return 2
def matrix_to_euler_angles(matrix, convention: str):
"""
Convert rotations given as rotation matrices to Euler angles in radians.
Args:
matrix: Rotation matrices as tensor of shape (..., 3, 3).
convention: Convention string of three uppercase letters.
Returns:
Euler angles in radians as tensor of shape (..., 3).
"""
if len(convention) != 3:
raise ValueError("Convention must have 3 letters.")
if convention[1] in (convention[0], convention[2]):
raise ValueError(f"Invalid convention {convention}.")
for letter in convention:
if letter not in ("X", "Y", "Z"):
raise ValueError(f"Invalid letter {letter} in convention string.")
if matrix.size(-1) != 3 or matrix.size(-2) != 3:
raise ValueError(f"Invalid rotation matrix shape f{matrix.shape}.")
i0 = _index_from_letter(convention[0])
i2 = _index_from_letter(convention[2])
tait_bryan = i0 != i2
if tait_bryan:
central_angle = torch.asin(
matrix[..., i0, i2] * (-1.0 if i0 - i2 in [-1, 2] else 1.0)
)
else:
central_angle = torch.acos(matrix[..., i0, i0])
o = (
_angle_from_tan(
convention[0], convention[1], matrix[..., i2], False, tait_bryan
),
central_angle,
_angle_from_tan(
convention[2], convention[1], matrix[..., i0, :], True, tait_bryan
),
)
return torch.stack(o, -1)
def random_quaternions(
n: int, dtype: Optional[torch.dtype] = None, device=None, requires_grad=False
):
"""
Generate random quaternions representing rotations,
i.e. versors with nonnegative real part.
Args:
n: Number of quaternions in a batch to return.
dtype: Type to return.
device: Desired device of returned tensor. Default:
uses the current device for the default tensor type.
requires_grad: Whether the resulting tensor should have the gradient
flag set.
Returns:
Quaternions as tensor of shape (N, 4).
"""
o = torch.randn((n, 4), dtype=dtype, device=device, requires_grad=requires_grad)
s = (o * o).sum(1)
o = o / _copysign(torch.sqrt(s), o[:, 0])[:, None]
return o
def random_rotations(
n: int, dtype: Optional[torch.dtype] = None, device=None, requires_grad=False
):
"""
Generate random rotations as 3x3 rotation matrices.
Args:
n: Number of rotation matrices in a batch to return.
dtype: Type to return.
device: Device of returned tensor. Default: if None,
uses the current device for the default tensor type.
requires_grad: Whether the resulting tensor should have the gradient
flag set.
Returns:
Rotation matrices as tensor of shape (n, 3, 3).
"""
quaternions = random_quaternions(
n, dtype=dtype, device=device, requires_grad=requires_grad
)
return quaternion_to_matrix(quaternions)
def random_rotation(
dtype: Optional[torch.dtype] = None, device=None, requires_grad=False
):
"""
Generate a single random 3x3 rotation matrix.
Args:
dtype: Type to return
device: Device of returned tensor. Default: if None,
uses the current device for the default tensor type
requires_grad: Whether the resulting tensor should have the gradient
flag set
Returns:
Rotation matrix as tensor of shape (3, 3).
"""
return random_rotations(1, dtype, device, requires_grad)[0]
def standardize_quaternion(quaternions):
"""
Convert a unit quaternion to a standard form: one in which the real
part is non negative.
Args:
quaternions: Quaternions with real part first,
as tensor of shape (..., 4).
Returns:
Standardized quaternions as tensor of shape (..., 4).
"""
return torch.where(quaternions[..., 0:1] < 0, -quaternions, quaternions)
def quaternion_raw_multiply(a, b):
"""
Multiply two quaternions.
Usual torch rules for broadcasting apply.
Args:
a: Quaternions as tensor of shape (..., 4), real part first.
b: Quaternions as tensor of shape (..., 4), real part first.
Returns:
The product of a and b, a tensor of quaternions shape (..., 4).
"""
aw, ax, ay, az = torch.unbind(a, -1)
bw, bx, by, bz = torch.unbind(b, -1)
ow = aw * bw - ax * bx - ay * by - az * bz
ox = aw * bx + ax * bw + ay * bz - az * by
oy = aw * by - ax * bz + ay * bw + az * bx
oz = aw * bz + ax * by - ay * bx + az * bw
return torch.stack((ow, ox, oy, oz), -1)
def quaternion_multiply(a, b):
"""
Multiply two quaternions representing rotations, returning the quaternion
representing their composition, i.e. the versor with nonnegative real part.
Usual torch rules for broadcasting apply.
Args:
a: Quaternions as tensor of shape (..., 4), real part first.
b: Quaternions as tensor of shape (..., 4), real part first.
Returns:
The product of a and b, a tensor of quaternions of shape (..., 4).
"""
ab = quaternion_raw_multiply(a, b)
return standardize_quaternion(ab)
def quaternion_invert(quaternion):
"""
Given a quaternion representing rotation, get the quaternion representing
its inverse.
Args:
quaternion: Quaternions as tensor of shape (..., 4), with real part
first, which must be versors (unit quaternions).
Returns:
The inverse, a tensor of quaternions of shape (..., 4).
"""
return quaternion * quaternion.new_tensor([1, -1, -1, -1])
def quaternion_apply(quaternion, point):
"""
Apply the rotation given by a quaternion to a 3D point.
Usual torch rules for broadcasting apply.
Args:
quaternion: Tensor of quaternions, real part first, of shape (..., 4).
point: Tensor of 3D points of shape (..., 3).
Returns:
Tensor of rotated points of shape (..., 3).
"""
if point.size(-1) != 3:
raise ValueError(f"Points are not in 3D, f{point.shape}.")
real_parts = point.new_zeros(point.shape[:-1] + (1,))
point_as_quaternion = torch.cat((real_parts, point), -1)
out = quaternion_raw_multiply(
quaternion_raw_multiply(quaternion, point_as_quaternion),
quaternion_invert(quaternion),
)
return out[..., 1:]
def axis_angle_to_matrix(axis_angle):
"""
Convert rotations given as axis/angle to rotation matrices.
Args:
axis_angle: Rotations given as a vector in axis angle form,
as a tensor of shape (..., 3), where the magnitude is
the angle turned anticlockwise in radians around the
vector's direction.
Returns:
Rotation matrices as tensor of shape (..., 3, 3).
"""
return quaternion_to_matrix(axis_angle_to_quaternion(axis_angle))
def matrix_to_axis_angle(matrix):
"""
Convert rotations given as rotation matrices to axis/angle.
Args:
matrix: Rotation matrices as tensor of shape (..., 3, 3).
Returns:
Rotations given as a vector in axis angle form, as a tensor
of shape (..., 3), where the magnitude is the angle
turned anticlockwise in radians around the vector's
direction.
"""
return quaternion_to_axis_angle(matrix_to_quaternion(matrix))
def axis_angle_to_quaternion(axis_angle):
"""
Convert rotations given as axis/angle to quaternions.
Args:
axis_angle: Rotations given as a vector in axis angle form,
as a tensor of shape (..., 3), where the magnitude is
the angle turned anticlockwise in radians around the
vector's direction.
Returns:
quaternions with real part first, as tensor of shape (..., 4).
"""
angles = torch.norm(axis_angle, p=2, dim=-1, keepdim=True)
half_angles = 0.5 * angles
eps = 1e-6
small_angles = angles.abs() < eps
sin_half_angles_over_angles = torch.empty_like(angles)
sin_half_angles_over_angles[~small_angles] = (
torch.sin(half_angles[~small_angles]) / angles[~small_angles]
)
# for x small, sin(x/2) is about x/2 - (x/2)^3/6
# so sin(x/2)/x is about 1/2 - (x*x)/48
sin_half_angles_over_angles[small_angles] = (
0.5 - (angles[small_angles] * angles[small_angles]) / 48
)
quaternions = torch.cat(
[torch.cos(half_angles), axis_angle * sin_half_angles_over_angles], dim=-1
)
return quaternions
def quaternion_to_axis_angle(quaternions):
"""
Convert rotations given as quaternions to axis/angle.
Args:
quaternions: quaternions with real part first,
as tensor of shape (..., 4).
Returns:
Rotations given as a vector in axis angle form, as a tensor
of shape (..., 3), where the magnitude is the angle
turned anticlockwise in radians around the vector's
direction.
"""
norms = torch.norm(quaternions[..., 1:], p=2, dim=-1, keepdim=True)
half_angles = torch.atan2(norms, quaternions[..., :1])
angles = 2 * half_angles
eps = 1e-6
small_angles = angles.abs() < eps
sin_half_angles_over_angles = torch.empty_like(angles)
sin_half_angles_over_angles[~small_angles] = (
torch.sin(half_angles[~small_angles]) / angles[~small_angles]
)
# for x small, sin(x/2) is about x/2 - (x/2)^3/6
# so sin(x/2)/x is about 1/2 - (x*x)/48
sin_half_angles_over_angles[small_angles] = (
0.5 - (angles[small_angles] * angles[small_angles]) / 48
)
return quaternions[..., 1:] / sin_half_angles_over_angles
def rotation_6d_to_matrix(d6: torch.Tensor) -> torch.Tensor:
"""
Converts 6D rotation representation by Zhou et al. [1] to rotation matrix
using Gram--Schmidt orthogonalisation per Section B of [1].
Args:
d6: 6D rotation representation, of size (*, 6)
Returns:
batch of rotation matrices of size (*, 3, 3)
[1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H.
On the Continuity of Rotation Representations in Neural Networks.
IEEE Conference on Computer Vision and Pattern Recognition, 2019.
Retrieved from http://arxiv.org/abs/1812.07035
"""
a1, a2 = d6[..., :3], d6[..., 3:]
b1 = F.normalize(a1, dim=-1)
b2 = a2 - (b1 * a2).sum(-1, keepdim=True) * b1
b2 = F.normalize(b2, dim=-1)
b3 = torch.cross(b1, b2, dim=-1)
return torch.stack((b1, b2, b3), dim=-2)
def matrix_to_rotation_6d(matrix: torch.Tensor) -> torch.Tensor:
"""
Converts rotation matrices to 6D rotation representation by Zhou et al. [1]
by dropping the last row. Note that 6D representation is not unique.
Args:
matrix: batch of rotation matrices of size (*, 3, 3)
Returns:
6D rotation representation, of size (*, 6)
[1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H.
On the Continuity of Rotation Representations in Neural Networks.
IEEE Conference on Computer Vision and Pattern Recognition, 2019.
Retrieved from http://arxiv.org/abs/1812.07035
"""
return matrix[..., :2, :].clone().reshape(*matrix.size()[:-2], 6)
| AGRoL-main | utils/rotation_conversions.py |
# MIT License
# Copyright (c) 2022 Guy Tevet
#
# This code is based on https://github.com/GuyTevet/motion-diffusion-model
# Copyright (c) Meta Platforms, Inc. All Rights Reserved
import argparse
import json
import os
from argparse import ArgumentParser
def parse_and_load_from_model(parser):
# args according to the loaded model
# do not try to specify them from cmd line since they will be overwritten
add_data_options(parser)
add_model_options(parser)
add_diffusion_options(parser)
args = parser.parse_args()
args_to_overwrite = []
for group_name in ["dataset", "model", "diffusion"]:
args_to_overwrite += get_args_per_group_name(parser, args, group_name)
# load args from model
model_path = get_model_path_from_args()
args_path = os.path.join(os.path.dirname(model_path), "args.json")
assert os.path.exists(args_path), "Arguments json file was not found!"
with open(args_path, "r") as fr:
model_args = json.load(fr)
for a in args_to_overwrite:
if a in model_args.keys():
# Use the chosen dataset, or use the dataset that is used to train the model
if a == "dataset":
if args.__dict__[a] is None:
args.__dict__[a] = model_args[a]
elif a == "input_motion_length":
continue
else:
args.__dict__[a] = model_args[a]
else:
print(
"Warning: was not able to load [{}], using default value [{}] instead.".format(
a, args.__dict__[a]
)
)
return args
def get_args_per_group_name(parser, args, group_name):
for group in parser._action_groups:
if group.title == group_name:
group_dict = {
a.dest: getattr(args, a.dest, None) for a in group._group_actions
}
return list(argparse.Namespace(**group_dict).__dict__.keys())
return ValueError("group_name was not found.")
def get_model_path_from_args():
try:
dummy_parser = ArgumentParser()
dummy_parser.add_argument("model_path")
dummy_args, _ = dummy_parser.parse_known_args()
return dummy_args.model_path
except Exception:
raise ValueError("model_path argument must be specified.")
def add_base_options(parser):
group = parser.add_argument_group("base")
group.add_argument(
"--cuda", default=True, type=bool, help="Use cuda device, otherwise use CPU."
)
group.add_argument("--device", default=0, type=int, help="Device id to use.")
group.add_argument("--seed", default=10, type=int, help="For fixing random seed.")
group.add_argument(
"--batch_size", default=64, type=int, help="Batch size during training."
)
group.add_argument(
"--timestep_respacing", default="", type=str, help="ddim timestep respacing."
)
def add_diffusion_options(parser):
group = parser.add_argument_group("diffusion")
group.add_argument(
"--noise_schedule",
default="cosine",
choices=["linear", "cosine"],
type=str,
help="Noise schedule type",
)
group.add_argument(
"--diffusion_steps",
default=1000,
type=int,
help="Number of diffusion steps (denoted T in the paper)",
)
group.add_argument(
"--sigma_small", default=True, type=bool, help="Use smaller sigma values."
)
def add_model_options(parser):
group = parser.add_argument_group("model")
group.add_argument(
"--arch",
default="DiffMLP",
type=str,
help="Architecture types as reported in the paper.",
)
group.add_argument(
"--motion_nfeat", default=132, type=int, help="motion feature dimension"
)
group.add_argument(
"--sparse_dim", default=54, type=int, help="sparse signal feature dimension"
)
group.add_argument("--layers", default=8, type=int, help="Number of layers.")
group.add_argument(
"--latent_dim", default=512, type=int, help="Transformer/GRU width."
)
group.add_argument(
"--cond_mask_prob",
default=0.0,
type=float,
help="The probability of masking the condition during training."
" For classifier-free guidance learning.",
)
group.add_argument(
"--input_motion_length",
default=196,
type=int,
help="Limit for the maximal number of frames.",
)
group.add_argument(
"--no_normalization",
action="store_true",
help="no data normalisation for the 6d motions",
)
def add_data_options(parser):
group = parser.add_argument_group("dataset")
group.add_argument(
"--dataset",
default=None,
choices=[
"amass",
],
type=str,
help="Dataset name (choose from list).",
)
group.add_argument(
"--dataset_path",
default="./dataset/AMASS/",
type=str,
help="Dataset path",
)
def add_training_options(parser):
group = parser.add_argument_group("training")
group.add_argument(
"--save_dir",
required=True,
type=str,
help="Path to save checkpoints and results.",
)
group.add_argument(
"--overwrite",
action="store_true",
help="If True, will enable to use an already existing save_dir.",
)
group.add_argument(
"--train_platform_type",
default="NoPlatform",
choices=["NoPlatform", "ClearmlPlatform", "TensorboardPlatform"],
type=str,
help="Choose platform to log results. NoPlatform means no logging.",
)
group.add_argument("--lr", default=2e-4, type=float, help="Learning rate.")
group.add_argument(
"--weight_decay", default=0.0, type=float, help="Optimizer weight decay."
)
group.add_argument(
"--lr_anneal_steps",
default=0,
type=int,
help="Number of learning rate anneal steps.",
)
group.add_argument(
"--train_dataset_repeat_times",
default=1000,
type=int,
help="Repeat the training dataset to save training time",
)
group.add_argument(
"--eval_during_training",
action="store_true",
help="If True, will run evaluation during training.",
)
group.add_argument(
"--log_interval", default=100, type=int, help="Log losses each N steps"
)
group.add_argument(
"--save_interval",
default=5000,
type=int,
help="Save checkpoints and run evaluation each N steps",
)
group.add_argument(
"--num_steps",
default=6000000,
type=int,
help="Training will stop after the specified number of steps.",
)
group.add_argument(
"--resume_checkpoint",
default="",
type=str,
help="If not empty, will start from the specified checkpoint (path to model###.pt file).",
)
group.add_argument(
"--load_optimizer",
action="store_true",
help="If True, will also load the saved optimizer state for network initialization",
)
group.add_argument(
"--num_workers",
default=8,
type=int,
help="Number of dataloader workers.",
)
def add_sampling_options(parser):
group = parser.add_argument_group("sampling")
group.add_argument(
"--overlapping_test",
action="store_true",
help="enabling overlapping test",
)
group.add_argument(
"--num_per_batch",
default=256,
type=int,
help="the batch size of each split during non-overlapping testing",
)
group.add_argument(
"--sld_wind_size",
default=70,
type=int,
help="the sliding window size",
)
group.add_argument(
"--vis",
action="store_true",
help="visualize the output",
)
group.add_argument(
"--fix_noise",
action="store_true",
help="fix init noise for the output",
)
group.add_argument(
"--fps",
default=30,
type=int,
help="FPS",
)
group.add_argument(
"--model_path",
required=True,
type=str,
help="Path to model####.pt file to be sampled.",
)
group.add_argument(
"--output_dir",
default="",
type=str,
help="Path to results dir (auto created by the script). "
"If empty, will create dir in parallel to checkpoint.",
)
group.add_argument(
"--support_dir",
type=str,
help="the dir that you store your smplh and dmpls dirs",
)
def add_evaluation_options(parser):
group = parser.add_argument_group("eval")
group.add_argument(
"--model_path",
required=True,
type=str,
help="Path to model####.pt file to be sampled.",
)
def train_args():
parser = ArgumentParser()
add_base_options(parser)
add_data_options(parser)
add_model_options(parser)
add_diffusion_options(parser)
add_training_options(parser)
return parser.parse_args()
def sample_args():
parser = ArgumentParser()
# args specified by the user: (all other will be loaded from the model)
add_base_options(parser)
add_sampling_options(parser)
return parse_and_load_from_model(parser)
def evaluation_parser():
parser = ArgumentParser()
# args specified by the user: (all other will be loaded from the model)
add_base_options(parser)
add_evaluation_options(parser)
return parse_and_load_from_model(parser)
| AGRoL-main | utils/parser_util.py |
# MIT License
# Copyright (c) 2022 ETH Sensing, Interaction & Perception Lab
#
# This code is based on https://github.com/eth-siplab/AvatarPoser
# Copyright (c) Meta Platforms, Inc. All Rights Reserved
import os
import cv2
import numpy as np
import trimesh
from body_visualizer.mesh.mesh_viewer import MeshViewer
from body_visualizer.tools.vis_tools import colors
from human_body_prior.tools.omni_tools import copy2cpu as c2c
from tqdm import tqdm
os.environ["PYOPENGL_PLATFORM"] = "egl"
class CheckerBoard:
def __init__(self, white=(247, 246, 244), black=(146, 163, 171)):
self.white = np.array(white) / 255.0
self.black = np.array(black) / 255.0
self.verts, self.faces, self.texts = None, None, None
self.offset = None
@staticmethod
def gen_checker_xy(black, white, square_size=0.5, xlength=50.0, ylength=50.0):
"""
generate a checker board in parallel to x-y plane
starting from (0, 0) to (xlength, ylength), in meters
return: trimesh.Trimesh
"""
xsquares = int(xlength / square_size)
ysquares = int(ylength / square_size)
verts, faces, texts = [], [], []
fcount = 0
for i in range(xsquares):
for j in range(ysquares):
p1 = np.array([i * square_size, j * square_size, 0])
p2 = np.array([(i + 1) * square_size, j * square_size, 0])
p3 = np.array([(i + 1) * square_size, (j + 1) * square_size, 0])
verts.extend([p1, p2, p3])
faces.append([fcount * 3, fcount * 3 + 1, fcount * 3 + 2])
fcount += 1
p1 = np.array([i * square_size, j * square_size, 0])
p2 = np.array([(i + 1) * square_size, (j + 1) * square_size, 0])
p3 = np.array([i * square_size, (j + 1) * square_size, 0])
verts.extend([p1, p2, p3])
faces.append([fcount * 3, fcount * 3 + 1, fcount * 3 + 2])
fcount += 1
if (i + j) % 2 == 0:
texts.append(black)
texts.append(black)
else:
texts.append(white)
texts.append(white)
# now compose as mesh
mesh = trimesh.Trimesh(
vertices=np.array(verts) + np.array([-5, -5, 0]), faces=np.array(faces), process=False, face_colors=np.array(texts))
return mesh
"""
# --------------------------------
# Visualize avatar using body pose information and body model
# --------------------------------
"""
def save_animation(body_pose, savepath, bm, fps=60, resolution=(800, 800)):
imw, imh = resolution
mv = MeshViewer(width=imw, height=imh, use_offscreen=True)
faces = c2c(bm.f)
img_array = []
for fId in tqdm(range(body_pose.v.shape[0])):
body_mesh = trimesh.Trimesh(
vertices=c2c(body_pose.v[fId]),
faces=faces,
vertex_colors=np.tile(colors["purple"], (6890, 1)),
)
generator = CheckerBoard()
checker_mesh = generator.gen_checker_xy(generator.black, generator.white)
body_mesh.apply_transform(
trimesh.transformations.rotation_matrix(-90, (0, 0, 10))
)
body_mesh.apply_transform(
trimesh.transformations.rotation_matrix(30, (10, 0, 0))
)
body_mesh.apply_transform(trimesh.transformations.scale_matrix(0.5))
checker_mesh.apply_transform(
trimesh.transformations.rotation_matrix(-90, (0, 0, 10))
)
checker_mesh.apply_transform(
trimesh.transformations.rotation_matrix(30, (10, 0, 0))
)
checker_mesh.apply_transform(trimesh.transformations.scale_matrix(0.5))
mv.set_static_meshes([checker_mesh, body_mesh])
body_image = mv.render(render_wireframe=False)
body_image = body_image.astype(np.uint8)
body_image = cv2.cvtColor(body_image, cv2.COLOR_BGR2RGB)
img_array.append(body_image)
out = cv2.VideoWriter(savepath, cv2.VideoWriter_fourcc(*"DIVX"), fps, resolution)
for i in range(len(img_array)):
out.write(img_array[i])
out.release()
| AGRoL-main | utils/utils_visualize.py |
# MIT License
# Copyright (c) 2022 ETH Sensing, Interaction & Perception Lab
#
# This code is based on https://github.com/eth-siplab/AvatarPoser
# Copyright (c) Meta Platforms, Inc. All Rights Reserved
import torch
from human_body_prior.tools import tgm_conversion as tgm
from human_body_prior.tools.rotation_tools import aa2matrot, matrot2aa
from torch.nn import functional as F
def bgs(d6s):
d6s = d6s.reshape(-1, 2, 3).permute(0, 2, 1)
bsz = d6s.shape[0]
b1 = F.normalize(d6s[:, :, 0], p=2, dim=1)
a2 = d6s[:, :, 1]
c = torch.bmm(b1.view(bsz, 1, -1), a2.view(bsz, -1, 1)).view(bsz, 1) * b1
b2 = F.normalize(a2 - c, p=2, dim=1)
b3 = torch.cross(b1, b2, dim=1)
return torch.stack([b1, b2, b3], dim=-1)
def matrot2sixd(pose_matrot):
"""
:param pose_matrot: Nx3x3
:return: pose_6d: Nx6
"""
pose_6d = torch.cat([pose_matrot[:, :3, 0], pose_matrot[:, :3, 1]], dim=1)
return pose_6d
def aa2sixd(pose_aa):
"""
:param pose_aa Nx3
:return: pose_6d: Nx6
"""
pose_matrot = aa2matrot(pose_aa)
pose_6d = matrot2sixd(pose_matrot)
return pose_6d
def sixd2matrot(pose_6d):
"""
:param pose_6d: Nx6
:return: pose_matrot: Nx3x3
"""
rot_vec_1 = pose_6d[:, :3]
rot_vec_2 = pose_6d[:, 3:6]
rot_vec_3 = torch.cross(rot_vec_1, rot_vec_2)
pose_matrot = torch.stack([rot_vec_1, rot_vec_2, rot_vec_3], dim=-1)
return pose_matrot
def sixd2aa(pose_6d, batch=False):
"""
:param pose_6d: Nx6
:return: pose_aa: Nx3
"""
if batch:
B, J, C = pose_6d.shape
pose_6d = pose_6d.reshape(-1, 6)
pose_matrot = sixd2matrot(pose_6d)
pose_aa = matrot2aa(pose_matrot)
if batch:
pose_aa = pose_aa.reshape(B, J, 3)
return pose_aa
def sixd2quat(pose_6d):
"""
:param pose_6d: Nx6
:return: pose_quaternion: Nx4
"""
pose_mat = sixd2matrot(pose_6d)
pose_mat_34 = torch.cat(
(pose_mat, torch.zeros(pose_mat.size(0), pose_mat.size(1), 1)), dim=-1
)
pose_quaternion = tgm.rotation_matrix_to_quaternion(pose_mat_34)
return pose_quaternion
def quat2aa(pose_quat):
"""
:param pose_quat: Nx4
:return: pose_aa: Nx3
"""
return tgm.quaternion_to_angle_axis(pose_quat)
| AGRoL-main | utils/utils_transform.py |
# MIT License
# Copyright (c) 2021 OpenAI
#
# This code is based on https://github.com/openai/guided-diffusion
"""
Helpers for distributed training.
"""
import socket
import torch as th
import torch.distributed as dist
# Change this to reflect your cluster layout.
# The GPU for a given rank is (rank % GPUS_PER_NODE).
GPUS_PER_NODE = 8
SETUP_RETRY_COUNT = 3
used_device = 0
def setup_dist(device=0):
"""
Setup a distributed process group.
"""
global used_device
used_device = device
if dist.is_initialized():
return
def dev():
"""
Get the device to use for torch.distributed.
"""
global used_device
if th.cuda.is_available() and used_device >= 0:
return th.device(f"cuda:{used_device}")
return th.device("cpu")
def load_state_dict(path, **kwargs):
"""
Load a PyTorch file without redundant fetches across MPI ranks.
"""
return th.load(path, **kwargs)
def sync_params(params):
"""
Synchronize a sequence of Tensors across ranks from rank 0.
"""
for p in params:
with th.no_grad():
dist.broadcast(p, 0)
def _find_free_port():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
finally:
s.close()
| AGRoL-main | utils/dist_util.py |
# MIT License
# Copyright (c) 2021 OpenAI
#
# This code is based on https://github.com/openai/guided-diffusion
# MIT License
# Copyright (c) 2022 Guy Tevet
#
# This code is based on https://github.com/GuyTevet/motion-diffusion-model
# Copyright (c) Meta Platforms, Inc. All Rights Reserved
import numpy as np
import torch as th
from .diffusion_model import DiffusionModel
def space_timesteps(num_timesteps, section_counts):
"""
Create a list of timesteps to use from an original diffusion process,
given the number of timesteps we want to take from equally-sized portions
of the original process.
For example, if there's 300 timesteps and the section counts are [10,15,20]
then the first 100 timesteps are strided to be 10 timesteps, the second 100
are strided to be 15 timesteps, and the final 100 are strided to be 20.
If the stride is a string starting with "ddim", then the fixed striding
from the DDIM paper is used, and only one section is allowed.
:param num_timesteps: the number of diffusion steps in the original
process to divide up.
:param section_counts: either a list of numbers, or a string containing
comma-separated numbers, indicating the step count
per section. As a special case, use "ddimN" where N
is a number of steps to use the striding from the
DDIM paper.
:return: a set of diffusion steps from the original process to use.
"""
if isinstance(section_counts, str):
if section_counts.startswith("ddim"):
desired_count = int(section_counts[len("ddim") :])
for i in range(1, num_timesteps):
if len(range(0, num_timesteps, i)) == desired_count:
return set(range(0, num_timesteps, i))
raise ValueError(
f"cannot create exactly {num_timesteps} steps with an integer stride"
)
section_counts = [int(x) for x in section_counts.split(",")]
size_per = num_timesteps // len(section_counts)
extra = num_timesteps % len(section_counts)
start_idx = 0
all_steps = []
for i, section_count in enumerate(section_counts):
size = size_per + (1 if i < extra else 0)
if size < section_count:
raise ValueError(
f"cannot divide section of {size} steps into {section_count}"
)
if section_count <= 1:
frac_stride = 1
else:
frac_stride = (size - 1) / (section_count - 1)
cur_idx = 0.0
taken_steps = []
for _ in range(section_count):
taken_steps.append(start_idx + round(cur_idx))
cur_idx += frac_stride
all_steps += taken_steps
start_idx += size
return set(all_steps)
class SpacedDiffusion(DiffusionModel):
"""
A diffusion process which can skip steps in a base diffusion process.
:param use_timesteps: a collection (sequence or set) of timesteps from the
original diffusion process to retain.
:param kwargs: the kwargs to create the base diffusion process.
"""
def __init__(self, use_timesteps, **kwargs):
self.use_timesteps = set(use_timesteps)
self.timestep_map = []
self.original_num_steps = len(kwargs["betas"])
base_diffusion = DiffusionModel(**kwargs)
last_alpha_cumprod = 1.0
new_betas = []
for i, alpha_cumprod in enumerate(base_diffusion.alphas_cumprod):
if i in self.use_timesteps:
new_betas.append(1 - alpha_cumprod / last_alpha_cumprod)
last_alpha_cumprod = alpha_cumprod
self.timestep_map.append(i)
kwargs["betas"] = np.array(new_betas)
super().__init__(**kwargs)
def p_mean_variance(
self, model, *args, **kwargs
): # pylint: disable=signature-differs
return super().p_mean_variance(self._wrap_model(model), *args, **kwargs)
def training_losses(
self, model, *args, **kwargs
): # pylint: disable=signature-differs
return super().training_losses(self._wrap_model(model), *args, **kwargs)
def condition_mean(self, cond_fn, *args, **kwargs):
return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs)
def condition_score(self, cond_fn, *args, **kwargs):
return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs)
def _wrap_model(self, model):
if isinstance(model, _WrappedModel):
return model
return _WrappedModel(
model, self.timestep_map, self.rescale_timesteps, self.original_num_steps
)
def _scale_timesteps(self, t):
# Scaling is done by the wrapped model.
return t
class _WrappedModel:
def __init__(self, model, timestep_map, rescale_timesteps, original_num_steps):
self.model = model
self.timestep_map = timestep_map
self.rescale_timesteps = rescale_timesteps
self.original_num_steps = original_num_steps
def __call__(self, x, ts, sparse, **kwargs):
map_tensor = th.tensor(self.timestep_map, device=ts.device, dtype=ts.dtype)
new_ts = map_tensor[ts]
if self.rescale_timesteps:
new_ts = new_ts.float() * (1000.0 / self.original_num_steps)
return self.model(x, new_ts, sparse, **kwargs)
| AGRoL-main | diffusion/respace.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.