python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import models.resunet as resunet
import models.res16unet as res16unet
MODELS = []
def add_models(module):
MODELS.extend([getattr(module, a) for a in dir(module) if 'Net' in a])
add_models(resunet)
add_models(res16unet)
def get_models():
'''Returns a tuple of sample models.'''
return MODELS
def load_model(name):
'''Creates and returns an instance of the model given its class name.
'''
# Find the model class from its name
all_models = get_models()
mdict = {model.__name__: model for model in all_models}
if name not in mdict:
print('Invalid model index. Options are:')
# Display a list of valid model names
for model in all_models:
print('\t* {}'.format(model.__name__))
return None
NetClass = mdict[name]
return NetClass
| ContrastiveSceneContexts-main | downstream/semseg/models/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from models.resnet import ResNetBase, get_norm
from models.modules.common import ConvType, NormType, conv, conv_tr
from models.modules.resnet_block import BasicBlock, Bottleneck
from MinkowskiEngine import MinkowskiReLU, SparseTensor
import MinkowskiEngine.MinkowskiOps as me
class Res16UNetBase(ResNetBase):
BLOCK = None
PLANES = (32, 64, 128, 256, 256, 256, 256, 256)
DILATIONS = (1, 1, 1, 1, 1, 1, 1, 1)
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
INIT_DIM = 32
OUT_PIXEL_DIST = 1
NORM_TYPE = NormType.BATCH_NORM
NON_BLOCK_CONV_TYPE = ConvType.SPATIAL_HYPERCUBE
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS
# To use the model, must call initialize_coords before forward pass.
# Once data is processed, call clear to reset the model before calling initialize_coords
def __init__(self, in_channels, out_channels, config, D=3, **kwargs):
super(Res16UNetBase, self).__init__(in_channels, out_channels, config, D)
def network_initialization(self, in_channels, out_channels, config, D):
# Setup net_metadata
dilations = self.DILATIONS
bn_momentum = config.optimizer.bn_momentum
def space_n_time_m(n, m):
return n if D == 3 else [n, n, n, m]
if D == 4:
self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1)
# Output of the first conv concated to conv6
self.inplanes = self.INIT_DIM
print("building model, ", in_channels)
self.conv0p1s1 = conv(
in_channels,
self.inplanes,
kernel_size=space_n_time_m(config.net.conv1_kernel_size, 1),
stride=1,
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn0 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.conv1p1s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn1 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block1 = self._make_layer(
self.BLOCK,
self.PLANES[0],
self.LAYERS[0],
dilation=dilations[0],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv2p2s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn2 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block2 = self._make_layer(
self.BLOCK,
self.PLANES[1],
self.LAYERS[1],
dilation=dilations[1],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv3p4s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn3 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block3 = self._make_layer(
self.BLOCK,
self.PLANES[2],
self.LAYERS[2],
dilation=dilations[2],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv4p8s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn4 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block4 = self._make_layer(
self.BLOCK,
self.PLANES[3],
self.LAYERS[3],
dilation=dilations[3],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr4p16s2 = conv_tr(
self.inplanes,
self.PLANES[4],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr4 = get_norm(self.NORM_TYPE, self.PLANES[4], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion
self.block5 = self._make_layer(
self.BLOCK,
self.PLANES[4],
self.LAYERS[4],
dilation=dilations[4],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr5p8s2 = conv_tr(
self.inplanes,
self.PLANES[5],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr5 = get_norm(self.NORM_TYPE, self.PLANES[5], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion
self.block6 = self._make_layer(
self.BLOCK,
self.PLANES[5],
self.LAYERS[5],
dilation=dilations[5],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr6p4s2 = conv_tr(
self.inplanes,
self.PLANES[6],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr6 = get_norm(self.NORM_TYPE, self.PLANES[6], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion
self.block7 = self._make_layer(
self.BLOCK,
self.PLANES[6],
self.LAYERS[6],
dilation=dilations[6],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr7p2s2 = conv_tr(
self.inplanes,
self.PLANES[7],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr7 = get_norm(self.NORM_TYPE, self.PLANES[7], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[7] + self.INIT_DIM
self.block8 = self._make_layer(
self.BLOCK,
self.PLANES[7],
self.LAYERS[7],
dilation=dilations[7],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.final = conv(self.PLANES[7], out_channels, kernel_size=1, stride=1, bias=True, D=D)
self.relu = MinkowskiReLU(inplace=True)
def forward(self, x):
out = self.conv0p1s1(x)
out = self.bn0(out)
out_p1 = self.relu(out)
out = self.conv1p1s2(out_p1)
out = self.bn1(out)
out = self.relu(out)
out_b1p2 = self.block1(out)
out = self.conv2p2s2(out_b1p2)
out = self.bn2(out)
out = self.relu(out)
out_b2p4 = self.block2(out)
out = self.conv3p4s2(out_b2p4)
out = self.bn3(out)
out = self.relu(out)
out_b3p8 = self.block3(out)
# pixel_dist=16
out = self.conv4p8s2(out_b3p8)
out = self.bn4(out)
out = self.relu(out)
out = self.block4(out)
# pixel_dist=8
out = self.convtr4p16s2(out)
out = self.bntr4(out)
out = self.relu(out)
out = me.cat(out, out_b3p8)
out = self.block5(out)
# pixel_dist=4
out = self.convtr5p8s2(out)
out = self.bntr5(out)
out = self.relu(out)
out = me.cat(out, out_b2p4)
out = self.block6(out)
# pixel_dist=2
out = self.convtr6p4s2(out)
out = self.bntr6(out)
out = self.relu(out)
out = me.cat(out, out_b1p2)
out = self.block7(out)
# pixel_dist=1
out = self.convtr7p2s2(out)
out = self.bntr7(out)
out = self.relu(out)
out = me.cat(out, out_p1)
out = self.block8(out)
return self.final(out), out
class Res16UNet14(Res16UNetBase):
BLOCK = BasicBlock
LAYERS = (1, 1, 1, 1, 1, 1, 1, 1)
class Res16UNet18(Res16UNetBase):
BLOCK = BasicBlock
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
class Res16UNet34(Res16UNetBase):
BLOCK = BasicBlock
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class Res16UNet50(Res16UNetBase):
BLOCK = Bottleneck
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class Res16UNet101(Res16UNetBase):
BLOCK = Bottleneck
LAYERS = (2, 3, 4, 23, 2, 2, 2, 2)
class Res16UNet14A(Res16UNet14):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class Res16UNet14A2(Res16UNet14A):
LAYERS = (1, 1, 1, 1, 2, 2, 2, 2)
class Res16UNet14B(Res16UNet14):
PLANES = (32, 64, 128, 256, 128, 128, 128, 128)
class Res16UNet14B2(Res16UNet14B):
LAYERS = (1, 1, 1, 1, 2, 2, 2, 2)
class Res16UNet14B3(Res16UNet14B):
LAYERS = (2, 2, 2, 2, 1, 1, 1, 1)
class Res16UNet14C(Res16UNet14):
PLANES = (32, 64, 128, 256, 192, 192, 128, 128)
class Res16UNet14D(Res16UNet14):
PLANES = (32, 64, 128, 256, 384, 384, 384, 384)
class Res16UNet18A(Res16UNet18):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class Res16UNet18B(Res16UNet18):
PLANES = (32, 64, 128, 256, 128, 128, 128, 128)
class Res16UNet18D(Res16UNet18):
PLANES = (32, 64, 128, 256, 384, 384, 384, 384)
class Res16UNet34A(Res16UNet34):
PLANES = (32, 64, 128, 256, 256, 128, 64, 64)
class Res16UNet34B(Res16UNet34):
PLANES = (32, 64, 128, 256, 256, 128, 64, 32)
class Res16UNet34C(Res16UNet34):
PLANES = (32, 64, 128, 256, 256, 128, 96, 96)
class STRes16UNetBase(Res16UNetBase):
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS
def __init__(self, in_channels, out_channels, config, D=4, **kwargs):
super(STRes16UNetBase, self).__init__(in_channels, out_channels, config, D, **kwargs)
class STRes16UNet14(STRes16UNetBase, Res16UNet14):
pass
class STRes16UNet14A(STRes16UNetBase, Res16UNet14A):
pass
class STRes16UNet18(STRes16UNetBase, Res16UNet18):
pass
class STRes16UNet34(STRes16UNetBase, Res16UNet34):
pass
class STRes16UNet50(STRes16UNetBase, Res16UNet50):
pass
class STRes16UNet101(STRes16UNetBase, Res16UNet101):
pass
class STRes16UNet18A(STRes16UNet18):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class STResTesseract16UNetBase(STRes16UNetBase):
CONV_TYPE = ConvType.HYPERCUBE
class STResTesseract16UNet18A(STRes16UNet18A, STResTesseract16UNetBase):
pass
| ContrastiveSceneContexts-main | downstream/semseg/models/res16unet.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from MinkowskiEngine import MinkowskiNetwork
class Model(MinkowskiNetwork):
"""
Base network for all sparse convnet
By default, all networks are segmentation networks.
"""
OUT_PIXEL_DIST = -1
def __init__(self, in_channels, out_channels, config, D, **kwargs):
super(Model, self).__init__(D)
self.in_channels = in_channels
self.out_channels = out_channels
self.config = config
class HighDimensionalModel(Model):
"""
Base network for all spatio (temporal) chromatic sparse convnet
"""
def __init__(self, in_channels, out_channels, config, D, **kwargs):
assert D > 4, "Num dimension smaller than 5"
super(HighDimensionalModel, self).__init__(in_channels, out_channels, config, D, **kwargs)
| ContrastiveSceneContexts-main | downstream/semseg/models/model.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
from models.common import get_norm
import MinkowskiEngine as ME
import MinkowskiEngine.MinkowskiFunctional as MEF
class BasicBlockBase(nn.Module):
expansion = 1
NORM_TYPE = 'BN'
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
bn_momentum=0.1,
D=3):
super(BasicBlockBase, self).__init__()
self.conv1 = ME.MinkowskiConvolution(
inplanes, planes, kernel_size=3, stride=stride, dimension=D)
self.norm1 = get_norm(self.NORM_TYPE, planes, bn_momentum=bn_momentum, D=D)
self.conv2 = ME.MinkowskiConvolution(
planes,
planes,
kernel_size=3,
stride=1,
dilation=dilation,
has_bias=False,
dimension=D)
self.norm2 = get_norm(self.NORM_TYPE, planes, bn_momentum=bn_momentum, D=D)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = MEF.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = MEF.relu(out)
return out
class BasicBlockBN(BasicBlockBase):
NORM_TYPE = 'BN'
class BasicBlockIN(BasicBlockBase):
NORM_TYPE = 'IN'
def get_block(norm_type,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
bn_momentum=0.1,
D=3):
if norm_type == 'BN':
return BasicBlockBN(inplanes, planes, stride, dilation, downsample, bn_momentum, D)
elif norm_type == 'IN':
return BasicBlockIN(inplanes, planes, stride, dilation, downsample, bn_momentum, D)
else:
raise ValueError(f'Type {norm_type}, not defined')
| ContrastiveSceneContexts-main | downstream/semseg/models/residual_block.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import MinkowskiEngine as ME
def get_norm(norm_type, num_feats, bn_momentum=0.05, D=-1):
if norm_type == 'BN':
return ME.MinkowskiBatchNorm(num_feats, momentum=bn_momentum)
elif norm_type == 'IN':
return ME.MinkowskiInstanceNorm(num_feats, dimension=D)
else:
raise ValueError(f'Type {norm_type}, not defined')
| ContrastiveSceneContexts-main | downstream/semseg/models/common.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
import MinkowskiEngine as ME
from models.model import Model
from models.modules.common import ConvType, NormType, get_norm, conv, sum_pool
from models.modules.resnet_block import BasicBlock, Bottleneck
class ResNetBase(Model):
BLOCK = None
LAYERS = ()
INIT_DIM = 64
PLANES = (64, 128, 256, 512)
OUT_PIXEL_DIST = 32
HAS_LAST_BLOCK = False
CONV_TYPE = ConvType.HYPERCUBE
def __init__(self, in_channels, out_channels, config, D=3, **kwargs):
assert self.BLOCK is not None
assert self.OUT_PIXEL_DIST > 0
super(ResNetBase, self).__init__(in_channels, out_channels, config, D, **kwargs)
self.network_initialization(in_channels, out_channels, config, D)
self.weight_initialization()
def network_initialization(self, in_channels, out_channels, config, D):
def space_n_time_m(n, m):
return n if D == 3 else [n, n, n, m]
if D == 4:
self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1)
dilations = config.dilations
bn_momentum = config.bn_momentum
self.inplanes = self.INIT_DIM
self.conv1 = conv(
in_channels,
self.inplanes,
kernel_size=space_n_time_m(config.conv1_kernel_size, 1),
stride=1,
D=D)
self.bn1 = get_norm(NormType.BATCH_NORM, self.inplanes, D=self.D, bn_momentum=bn_momentum)
self.relu = ME.MinkowskiReLU(inplace=True)
self.pool = sum_pool(kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), D=D)
self.layer1 = self._make_layer(
self.BLOCK,
self.PLANES[0],
self.LAYERS[0],
stride=space_n_time_m(2, 1),
dilation=space_n_time_m(dilations[0], 1))
self.layer2 = self._make_layer(
self.BLOCK,
self.PLANES[1],
self.LAYERS[1],
stride=space_n_time_m(2, 1),
dilation=space_n_time_m(dilations[1], 1))
self.layer3 = self._make_layer(
self.BLOCK,
self.PLANES[2],
self.LAYERS[2],
stride=space_n_time_m(2, 1),
dilation=space_n_time_m(dilations[2], 1))
self.layer4 = self._make_layer(
self.BLOCK,
self.PLANES[3],
self.LAYERS[3],
stride=space_n_time_m(2, 1),
dilation=space_n_time_m(dilations[3], 1))
self.final = conv(
self.PLANES[3] * self.BLOCK.expansion, out_channels, kernel_size=1, bias=True, D=D)
def weight_initialization(self):
for m in self.modules():
if isinstance(m, ME.MinkowskiBatchNorm):
nn.init.constant_(m.bn.weight, 1)
nn.init.constant_(m.bn.bias, 0)
def _make_layer(self,
block,
planes,
blocks,
stride=1,
dilation=1,
norm_type=NormType.BATCH_NORM,
bn_momentum=0.1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False,
D=self.D),
get_norm(norm_type, planes * block.expansion, D=self.D, bn_momentum=bn_momentum),
)
layers = []
layers.append(
block(
self.inplanes,
planes,
stride=stride,
dilation=dilation,
downsample=downsample,
conv_type=self.CONV_TYPE,
D=self.D))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
stride=1,
dilation=dilation,
conv_type=self.CONV_TYPE,
D=self.D))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.pool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.final(x)
return x
class ResNet14(ResNetBase):
BLOCK = BasicBlock
LAYERS = (1, 1, 1, 1)
class ResNet18(ResNetBase):
BLOCK = BasicBlock
LAYERS = (2, 2, 2, 2)
class ResNet34(ResNetBase):
BLOCK = BasicBlock
LAYERS = (3, 4, 6, 3)
class ResNet50(ResNetBase):
BLOCK = Bottleneck
LAYERS = (3, 4, 6, 3)
class ResNet101(ResNetBase):
BLOCK = Bottleneck
LAYERS = (3, 4, 23, 3)
class STResNetBase(ResNetBase):
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS
def __init__(self, in_channels, out_channels, config, D=4, **kwargs):
super(STResNetBase, self).__init__(in_channels, out_channels, config, D, **kwargs)
class STResNet14(STResNetBase, ResNet14):
pass
class STResNet18(STResNetBase, ResNet18):
pass
class STResNet34(STResNetBase, ResNet34):
pass
class STResNet50(STResNetBase, ResNet50):
pass
class STResNet101(STResNetBase, ResNet101):
pass
class STResTesseractNetBase(STResNetBase):
CONV_TYPE = ConvType.HYPERCUBE
class STResTesseractNet14(STResTesseractNetBase, STResNet14):
pass
class STResTesseractNet18(STResTesseractNetBase, STResNet18):
pass
class STResTesseractNet34(STResTesseractNetBase, STResNet34):
pass
class STResTesseractNet50(STResTesseractNetBase, STResNet50):
pass
class STResTesseractNet101(STResTesseractNetBase, STResNet101):
pass
| ContrastiveSceneContexts-main | downstream/semseg/models/resnet.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
from models.modules.common import ConvType, NormType, get_norm, conv
from MinkowskiEngine import MinkowskiReLU
class BasicBlockBase(nn.Module):
expansion = 1
NORM_TYPE = NormType.BATCH_NORM
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
conv_type=ConvType.HYPERCUBE,
bn_momentum=0.1,
D=3):
super(BasicBlockBase, self).__init__()
self.conv1 = conv(
inplanes, planes, kernel_size=3, stride=stride, dilation=dilation, conv_type=conv_type, D=D)
self.norm1 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum)
self.conv2 = conv(
planes,
planes,
kernel_size=3,
stride=1,
dilation=dilation,
bias=False,
conv_type=conv_type,
D=D)
self.norm2 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum)
self.relu = MinkowskiReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class BasicBlock(BasicBlockBase):
NORM_TYPE = NormType.BATCH_NORM
class BasicBlockIN(BasicBlockBase):
NORM_TYPE = NormType.INSTANCE_NORM
class BasicBlockINBN(BasicBlockBase):
NORM_TYPE = NormType.INSTANCE_BATCH_NORM
class BottleneckBase(nn.Module):
expansion = 4
NORM_TYPE = NormType.BATCH_NORM
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
conv_type=ConvType.HYPERCUBE,
bn_momentum=0.1,
D=3):
super(BottleneckBase, self).__init__()
self.conv1 = conv(inplanes, planes, kernel_size=1, D=D)
self.norm1 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum)
self.conv2 = conv(
planes, planes, kernel_size=3, stride=stride, dilation=dilation, conv_type=conv_type, D=D)
self.norm2 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum)
self.conv3 = conv(planes, planes * self.expansion, kernel_size=1, D=D)
self.norm3 = get_norm(self.NORM_TYPE, planes * self.expansion, D, bn_momentum=bn_momentum)
self.relu = MinkowskiReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.norm3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(BottleneckBase):
NORM_TYPE = NormType.BATCH_NORM
class BottleneckIN(BottleneckBase):
NORM_TYPE = NormType.INSTANCE_NORM
class BottleneckINBN(BottleneckBase):
NORM_TYPE = NormType.INSTANCE_BATCH_NORM
| ContrastiveSceneContexts-main | downstream/semseg/models/modules/resnet_block.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
import MinkowskiEngine as ME
from models.modules.common import ConvType, NormType
from models.modules.resnet_block import BasicBlock, Bottleneck
class SELayer(nn.Module):
def __init__(self, channel, reduction=16, D=-1):
# Global coords does not require coords_key
super(SELayer, self).__init__()
self.fc = nn.Sequential(
ME.MinkowskiLinear(channel, channel // reduction), ME.MinkowskiReLU(inplace=True),
ME.MinkowskiLinear(channel // reduction, channel), ME.MinkowskiSigmoid())
self.pooling = ME.MinkowskiGlobalPooling(dimension=D)
self.broadcast_mul = ME.MinkowskiBroadcastMultiplication(dimension=D)
def forward(self, x):
y = self.pooling(x)
y = self.fc(y)
return self.broadcast_mul(x, y)
class SEBasicBlock(BasicBlock):
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
conv_type=ConvType.HYPERCUBE,
reduction=16,
D=-1):
super(SEBasicBlock, self).__init__(
inplanes,
planes,
stride=stride,
dilation=dilation,
downsample=downsample,
conv_type=conv_type,
D=D)
self.se = SELayer(planes, reduction=reduction, D=D)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.se(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class SEBasicBlockSN(SEBasicBlock):
NORM_TYPE = NormType.SPARSE_SWITCH_NORM
class SEBasicBlockIN(SEBasicBlock):
NORM_TYPE = NormType.SPARSE_INSTANCE_NORM
class SEBasicBlockLN(SEBasicBlock):
NORM_TYPE = NormType.SPARSE_LAYER_NORM
class SEBottleneck(Bottleneck):
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
conv_type=ConvType.HYPERCUBE,
D=3,
reduction=16):
super(SEBottleneck, self).__init__(
inplanes,
planes,
stride=stride,
dilation=dilation,
downsample=downsample,
conv_type=conv_type,
D=D)
self.se = SELayer(planes * self.expansion, reduction=reduction, D=D)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.norm3(out)
out = self.se(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class SEBottleneckSN(SEBottleneck):
NORM_TYPE = NormType.SPARSE_SWITCH_NORM
class SEBottleneckIN(SEBottleneck):
NORM_TYPE = NormType.SPARSE_INSTANCE_NORM
class SEBottleneckLN(SEBottleneck):
NORM_TYPE = NormType.SPARSE_LAYER_NORM
| ContrastiveSceneContexts-main | downstream/semseg/models/modules/senet_block.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree. | ContrastiveSceneContexts-main | downstream/semseg/models/modules/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import collections
from enum import Enum
import torch.nn as nn
import MinkowskiEngine as ME
class NormType(Enum):
BATCH_NORM = 0
INSTANCE_NORM = 1
INSTANCE_BATCH_NORM = 2
def get_norm(norm_type, n_channels, D, bn_momentum=0.1):
if norm_type == NormType.BATCH_NORM:
return ME.MinkowskiBatchNorm(n_channels, momentum=bn_momentum)
elif norm_type == NormType.INSTANCE_NORM:
return ME.MinkowskiInstanceNorm(n_channels)
elif norm_type == NormType.INSTANCE_BATCH_NORM:
return nn.Sequential(
ME.MinkowskiInstanceNorm(n_channels),
ME.MinkowskiBatchNorm(n_channels, momentum=bn_momentum))
else:
raise ValueError(f'Norm type: {norm_type} not supported')
class ConvType(Enum):
"""
Define the kernel region type
"""
HYPERCUBE = 0, 'HYPERCUBE'
SPATIAL_HYPERCUBE = 1, 'SPATIAL_HYPERCUBE'
SPATIO_TEMPORAL_HYPERCUBE = 2, 'SPATIO_TEMPORAL_HYPERCUBE'
HYPERCROSS = 3, 'HYPERCROSS'
SPATIAL_HYPERCROSS = 4, 'SPATIAL_HYPERCROSS'
SPATIO_TEMPORAL_HYPERCROSS = 5, 'SPATIO_TEMPORAL_HYPERCROSS'
SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS = 6, 'SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS '
def __new__(cls, value, name):
member = object.__new__(cls)
member._value_ = value
member.fullname = name
return member
def __int__(self):
return self.value
# Covert the ConvType var to a RegionType var
conv_to_region_type = {
# kernel_size = [k, k, k, 1]
ConvType.HYPERCUBE: ME.RegionType.HYPERCUBE,
ConvType.SPATIAL_HYPERCUBE: ME.RegionType.HYPERCUBE,
ConvType.SPATIO_TEMPORAL_HYPERCUBE: ME.RegionType.HYPERCUBE,
ConvType.HYPERCROSS: ME.RegionType.HYPERCROSS,
ConvType.SPATIAL_HYPERCROSS: ME.RegionType.HYPERCROSS,
ConvType.SPATIO_TEMPORAL_HYPERCROSS: ME.RegionType.HYPERCROSS,
ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS: ME.RegionType.HYBRID
}
int_to_region_type = {m.value: m for m in ME.RegionType}
def convert_region_type(region_type):
"""
Convert the integer region_type to the corresponding RegionType enum object.
"""
return int_to_region_type[region_type]
def convert_conv_type(conv_type, kernel_size, D):
assert isinstance(conv_type, ConvType), "conv_type must be of ConvType"
region_type = conv_to_region_type[conv_type]
axis_types = None
if conv_type == ConvType.SPATIAL_HYPERCUBE:
# No temporal convolution
if isinstance(kernel_size, collections.Sequence):
kernel_size = kernel_size[:3]
else:
kernel_size = [
kernel_size,
] * 3
if D == 4:
kernel_size.append(1)
elif conv_type == ConvType.SPATIO_TEMPORAL_HYPERCUBE:
# conv_type conversion already handled
assert D == 4
elif conv_type == ConvType.HYPERCUBE:
# conv_type conversion already handled
pass
elif conv_type == ConvType.SPATIAL_HYPERCROSS:
if isinstance(kernel_size, collections.Sequence):
kernel_size = kernel_size[:3]
else:
kernel_size = [
kernel_size,
] * 3
if D == 4:
kernel_size.append(1)
elif conv_type == ConvType.HYPERCROSS:
# conv_type conversion already handled
pass
elif conv_type == ConvType.SPATIO_TEMPORAL_HYPERCROSS:
# conv_type conversion already handled
assert D == 4
elif conv_type == ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS:
# Define the CUBIC conv kernel for spatial dims and CROSS conv for temp dim
axis_types = [
ME.RegionType.HYPERCUBE,
] * 3
if D == 4:
axis_types.append(ME.RegionType.HYPERCROSS)
return region_type, axis_types, kernel_size
def conv(in_planes,
out_planes,
kernel_size,
stride=1,
dilation=1,
bias=False,
conv_type=ConvType.HYPERCUBE,
D=-1):
assert D > 0, 'Dimension must be a positive integer'
region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D)
kernel_generator = ME.KernelGenerator(
kernel_size, stride, dilation, region_type=region_type, axis_types=axis_types, dimension=D)
return ME.MinkowskiConvolution(
in_channels=in_planes,
out_channels=out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
has_bias=bias,
kernel_generator=kernel_generator,
dimension=D)
def conv_tr(in_planes,
out_planes,
kernel_size,
upsample_stride=1,
dilation=1,
bias=False,
conv_type=ConvType.HYPERCUBE,
D=-1):
assert D > 0, 'Dimension must be a positive integer'
region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D)
kernel_generator = ME.KernelGenerator(
kernel_size,
upsample_stride,
dilation,
region_type=region_type,
axis_types=axis_types,
dimension=D)
return ME.MinkowskiConvolutionTranspose(
in_channels=in_planes,
out_channels=out_planes,
kernel_size=kernel_size,
stride=upsample_stride,
dilation=dilation,
has_bias=bias,
kernel_generator=kernel_generator,
dimension=D)
def avg_pool(kernel_size,
stride=1,
dilation=1,
conv_type=ConvType.HYPERCUBE,
in_coords_key=None,
D=-1):
assert D > 0, 'Dimension must be a positive integer'
region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D)
kernel_generator = ME.KernelGenerator(
kernel_size, stride, dilation, region_type=region_type, axis_types=axis_types, dimension=D)
return ME.MinkowskiAvgPooling(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
kernel_generator=kernel_generator,
dimension=D)
def avg_unpool(kernel_size, stride=1, dilation=1, conv_type=ConvType.HYPERCUBE, D=-1):
assert D > 0, 'Dimension must be a positive integer'
region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D)
kernel_generator = ME.KernelGenerator(
kernel_size, stride, dilation, region_type=region_type, axis_types=axis_types, dimension=D)
return ME.MinkowskiAvgUnpooling(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
kernel_generator=kernel_generator,
dimension=D)
def sum_pool(kernel_size, stride=1, dilation=1, conv_type=ConvType.HYPERCUBE, D=-1):
assert D > 0, 'Dimension must be a positive integer'
region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D)
kernel_generator = ME.KernelGenerator(
kernel_size, stride, dilation, region_type=region_type, axis_types=axis_types, dimension=D)
return ME.MinkowskiSumPooling(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
kernel_generator=kernel_generator,
dimension=D)
| ContrastiveSceneContexts-main | downstream/semseg/models/modules/common.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import torch
import numpy as np
import glob
import time
import argparse
import pykeops
from pykeops.torch import LazyTensor
pykeops.clean_pykeops()
def parse_args():
"""parse input arguments"""
parser = argparse.ArgumentParser(description='data_efficient3d')
parser.add_argument('--point_data', type=str, default='/checkpoint/jihou/data/scannet/pointcloud/')
parser.add_argument('--feat_data', type=str, default='/checkpoint/jihou/checkpoint/scannet/pretrain/partition8_4096_60k/1/outputs/feat')
parser.add_argument('--num_points', type=int, default=100)
parser.add_argument('--num_iters', type=int, default=50)
parser.add_argument('--output', type=str, default='./output')
return parser.parse_args()
def kmeans(pointcloud, k=10, iterations=10, verbose=True):
n, dim = pointcloud.shape # Number of samples, dimension of the ambient space
start = time.time()
clusters = pointcloud[:k, :].clone() # Simplistic random initialization
pointcloud_cuda = LazyTensor(pointcloud[:, None, :]) # (Npoints, 1, D)
# K-means loop:
for _ in range(iterations):
clusters_previous = clusters.clone()
clusters_gpu = LazyTensor(clusters[None, :, :]) # (1, Nclusters, D)
distance_matrix = ((pointcloud_cuda - clusters_gpu) ** 2).sum(-1) # (Npoints, Nclusters) symbolic matrix of squared distances
cloest_clusters = distance_matrix.argmin(dim=1).long().view(-1) # Points -> Nearest cluster
# #points for each cluster
clusters_count = torch.bincount(cloest_clusters, minlength=k).float() # Class weights
for d in range(dim): # Compute the cluster centroids with torch.bincount:
clusters[:, d] = torch.bincount(cloest_clusters, weights=pointcloud[:, d], minlength=k) / clusters_count
# for clusters that have no points assigned
mask = clusters_count == 0
clusters[mask] = clusters_previous[mask]
end = time.time()
if verbose:
print("K-means example with {:,} points in dimension {:,}, K = {:,}:".format(n, dim, k))
print('Timing for {} iterations: {:.5f}s = {} x {:.5f}s\n'.format(
iterations, end - start, iterations, (end-start) / iterations))
# nearest neighbouring search for each cluster
cloest_points_to_centers = distance_matrix.argmin(dim=0).long().view(-1)
return cloest_points_to_centers
def kmeans_sampling(args):
pointcloud_names = glob.glob(os.path.join(args.raw_data, "*.pth"))
sampled_inds = {}
for idx, pointcloud_name in enumerate(pointcloud_names):
print('{}/{}: {}'.format(idx, len(pointcloud_names), pointcloud_name))
pointcloud = torch.load(pointcloud_name)
scene_name = os.path.basename(pointcloud_name).split('.')[0]
coords = pointcloud[0].astype(np.float32)
colors = pointcloud[1].astype(np.int32)
candidates = []
candidates.append(coords)
candidates.append(colors)
feats = torch.load(os.path.join(args.feat_data, scene_name))
candidates.append(feats)
candidates = torch.from_numpy(np.concatenate(candidates,1)).cuda().float()
K = args.num_points
sampled_inds_per_scene = kmeans(candidates, K, args.num_iters).cpu().numpy()
sampled_inds[scene_name] = sampled_inds_per_scene
return sampled_inds
if __name__ == "__main__":
args = parse_args()
sampled_inds = kmeans_sampling(args)
torch.save(sampled_inds, args.output)
| ContrastiveSceneContexts-main | downstream/semseg/lib/sampling_points.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree. | ContrastiveSceneContexts-main | downstream/semseg/lib/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
from matplotlib.pyplot import *
from PIL import Image
colors = [ 'xkcd:blue',
'xkcd:red',
'xkcd:purple',
'xkcd:orchid',
'xkcd:orange',
'xkcd:grey',
'xkcd:teal',
'xkcd:sienna',
'xkcd:azure',
'xkcd:green',
'xkcd:black',
'xkcd:goldenrod']
def bar_plot_insseg(image_name='bar_insseg.png'):
labels = ['20 Points', '50 Points', '100 Points', '200 points']
RAND = [14.6, 21.6, 34.0, 43.5]
Kmeans = [15.6, 24.3, 35.7, 42.3]
OURS_I = [26.3, 32.6, 39.9, 48.9]
OURS_S = [25.8, 32.5, 44.2, 48.3]
OURS_IS = [27.2, 35.7, 43.6, 50.4]
x = np.array([0,2,4,6]) # the label locations
width = 1.7 # the width of the bars
font = {'family' : 'Times New Roman',
'size' : 11}
matplotlib.rc('font', **font)
fig, ax = plt.subplots(1,1)
fig.set_size_inches(5.5, 4.5)
rects1 = ax.bar(x - width*2/5, RAND, width/5, label='RAND')
rects2 = ax.bar(x - width*1/5, Kmeans, width/5, label='Kmeans')
rects3 = ax.bar(x , OURS_I, width/5, label='OURS_I')
rects4 = ax.bar(x + width*1/5, OURS_S, width/5, label='OURS_S')
rects5 = ax.bar(x + width*2/5, OURS_IS, width/5, label='OURS_IS')
#rects1 = ax.bar(x - width*2/4, points20, width/4, label='20')
#rects2 = ax.bar(x - width/4, points50, width/4, label='50')
#rects3 = ax.bar(x + width/4, points100, width/4, label='100')
#rects4 = ax.bar(x + width*2/4, points200, width/4, label='200')
ax.plot(np.arange(len(labels)+15)-2, [56.9]*(len(x)+15), '--', linewidth=2.25, color=colors[-1])
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('[email protected]')
ax.set_xlabel('Number of Annotated Points Per Scene')
ax.set(xlim=[-1, 7], ylim=[0, 61])
#ax.set_title('')
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.text(1.5, 58, '150,000 Annotated Points Per Scene', fontsize=8)
ax.legend(loc=2)
def autolabel(rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",fontsize=6,
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
autolabel(rects3)
autolabel(rects4)
autolabel(rects5)
fig.tight_layout()
plt.show()
fig.savefig(image_name, dpi=600)
image = Image.open(image_name)
image.save(image_name)
def bar_plot_sem(image_name='bar_semseg.png'):
#labels = ['RAND', 'KMEANS', 'OURS_I', 'OURS_S', 'OURS_IS']
#points20 = [41.9, 45.9, 53.6, 55.5, 53.8]
#points50 = [53.9, 55.4, 60.7, 60.5, 62.9]
#points100 = [62.2, 60.6, 65.7, 65.9, 66.9]
#points200 = [65.5, 64.3, 68.2, 68.2, 69.0]
labels = ['20 Points', '50 Points', '100 Points', '200 points']
RAND = [41.9, 53.9, 62.2, 65.5]
Kmeans = [45.9, 55.4, 60.6, 64.3]
OURS_I = [55.5, 60.5, 65.9, 68.2]
OURS_S = [53.6, 60.7, 65.7, 68.2]
OURS_IS = [53.8, 62.9, 66.9, 69.0]
x = np.array([0,2,4,6]) # the label locations
width = 1.7 # the width of the bars
font = {'family' : 'Times New Roman',
'size' : 11}
matplotlib.rc('font', **font)
fig, ax = plt.subplots(1,1)
fig.set_size_inches(5.5, 4.5)
rects1 = ax.bar(x - width*2/5, RAND, width/5, label='RAND')
rects2 = ax.bar(x - width*1/5, Kmeans, width/5, label='Kmeans')
rects3 = ax.bar(x , OURS_I, width/5, label='OURS_I')
rects4 = ax.bar(x + width*1/5, OURS_S, width/5, label='OURS_S')
rects5 = ax.bar(x + width*2/5, OURS_IS, width/5, label='OURS_IS')
#rects1 = ax.bar(x - width*2/4, points20, width/4, label='20')
#rects2 = ax.bar(x - width/4, points50, width/4, label='50')
#rects3 = ax.bar(x + width/4, points100, width/4, label='100')
#rects4 = ax.bar(x + width*2/4, points200, width/4, label='200')
ax.plot(np.arange(len(labels)+15)-2, [72.2]*(len(x)+15), '--', linewidth=2.25, color=colors[-1])
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('mIoU')
ax.set_xlabel('Number of Annotated Points Per Scene')
ax.set(xlim=[-1, 7], ylim=[40, 75])
#ax.set_title('')
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.text(1.5, 73, '150,000 Annotated Points Per Scene', fontsize=8)
ax.legend()
def autolabel(rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",fontsize=6,
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
autolabel(rects3)
autolabel(rects4)
autolabel(rects5)
fig.tight_layout()
plt.show()
fig.savefig(image_name, dpi=600)
image = Image.open(image_name)
image.save(image_name)
def plot_curves(curves,
xlabel='% Dataset Labeled\n(ScanNet-5-Recon)',
xlim=[4, 36],
xticks=np.arange(5, 35, 5),
xticklabels=None,
ylabel='mIoU',
ylim=[0.2, 0.65],
yticks=np.arange(0.2, 0.65, 0.05),
if_grid=True,
image_name='test.png'):
font = {'family' : 'Times New Roman',
'size' : 11}
matplotlib.rc('font', **font)
fig, subplot = plt.subplots(1,1)
fig.set_size_inches(8.0, 4.0)
subplot.set(xlim=xlim, ylim=ylim, xlabel=xlabel, ylabel=ylabel)
subplot.set(xticks=xticks, yticks=yticks)
if xticklabels:
subplot.axes.set_xticklabels(xticklabels)
subplot.grid(if_grid)
for idx, curve in enumerate(curves):
name = ''
fmt=''
marker = ''
markersize = 10
linewidth=4.0
color = colors[idx%len(colors)]
if 'name' in curve:
name = curve['name']
if 'marker' in curve:
marker = curve['marker']
if 'markersize' in curve:
marker_size = curve['markersize']
if 'color' in curve:
color = curve['color']
if 'linewidth' in curve:
linewidth = curve['linewidth']
if 'fmt' in curve:
fmt = curve['fmt']
x = curve['x']
y = curve['y']
subplot.plot(x, y, fmt, label=name, marker=marker, markersize=markersize, linewidth=linewidth, color=color)
subplot.legend(loc='best')
fig.tight_layout()
plt.show()
fig.savefig(image_name, dpi=600)
image = Image.open(image_name)
w, h = image.size
image.crop((75, 75, w - 75, h - 60)).save(image_name)
def shape_contexts_ablation():
'''
Variants & 1024 & 2048 & 4096 \\
\hline
1 & 59.7 & 60.7 & 60.1 \\
2 & 61.4 & 61.6 & 61.9 \\
4 & 61.7 & 61.8 & 63.0 \\
8 & 61.2 & 62.1 & 63.4 \\
'''
data = [
{'name': 'No scene contexts', 'x': [1, 2, 3, 4], 'y': [60.5, 60.7, 60.1, 60.6], 'marker': 'o'},
{'name': '2 Partitions', 'x': [2, 4, 6, 8], 'y': [61.4, 61.6, 61.9, 61.9], 'marker': 'o'},
{'name': '4 Partitions', 'x': [2, 4, 6, 8], 'y': [61.7, 61.8, 63.0, 62.9], 'marker': '^'},
{'name': '8 Partitions', 'x': [2, 4, 6, 8], 'y': [61.2, 62.1, 63.4, 63.5], 'marker': 's'},
{'name': '16 Partitions', 'x': [2, 4, 6], 'y': [61.1, 61.9, 62.6], 'marker': 'p'},
{'name': '32 Partitions', 'x': [2, 4, 6], 'y': [60.9, 61.7, 62.1], 'marker': '*'},
]
plot_curves(curves=data,
xlabel='Number of Points',
ylabel='[email protected]',
xlim=[1.9,8.1],
xticks=[2,4,6,8],
xticklabels=[1024, 2048, 4096, 8192],
ylim=[60.9, 63.5],
yticks=[60.9, 61.5, 62.5, 63.5],
if_grid=True,
image_name='shape_context_ablation.jpg')
def bar_plot_active(image_name='bar_active.png'):
labels = ['20 Points', '50 Points', '100 Points', '200 points']
#kmeans = [734, 1034, 1386, 1688]
#act = [1151, 1726, 2153, 2456]
#total 2873
kmeans = [0.255, 0.36, 0.482, 0.588]
act = [0.401, 0.601, 0.749, 0.855]
x = np.array([0,2,4,6]) # the label locations
width = 1.7 # the width of the bars
font = {'family' : 'Times New Roman',
'size' : 11}
matplotlib.rc('font', **font)
fig, ax = plt.subplots(1,1)
fig.set_size_inches(8, 4)
rects1 = ax.bar(x , kmeans, width/2, label='kmeans sampling (xyz+rgb)')
rects2 = ax.bar(x + width*1/2, act, width/2, label='act. labeling')
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Percentage of Distinct Objects')
ax.set_xlabel('Number of Annotated Points Per Scene')
ax.set(xlim=[-1, 8], ylim=[0.2, 0.9])
# manipulate
vals = ax.get_yticks()
ax.set_yticklabels(['{:.0%}'.format(x) for x in vals])
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.grid(False)
ax.legend(loc=2)
def autolabel(rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate('{:.1%}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",fontsize=10,
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
fig.tight_layout()
plt.show()
fig.savefig(image_name, dpi=600)
image = Image.open(image_name)
image.save(image_name)
if __name__=='__main__':
#shape_contexts_ablation()
#bar_plot_insseg()
#bar_plot_semseg()
#bar_plot_active()
| ContrastiveSceneContexts-main | downstream/semseg/lib/plot_graph.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from torch.optim import SGD, Adam
from torch.optim.lr_scheduler import LambdaLR, StepLR
class LambdaStepLR(LambdaLR):
def __init__(self, optimizer, lr_lambda, last_step=-1):
super(LambdaStepLR, self).__init__(optimizer, lr_lambda, last_step)
@property
def last_step(self):
"""Use last_epoch for the step counter"""
return self.last_epoch
@last_step.setter
def last_step(self, v):
self.last_epoch = v
class PolyLR(LambdaStepLR):
"""DeepLab learning rate policy"""
def __init__(self, optimizer, max_iter, power=0.9, last_step=-1):
super(PolyLR, self).__init__(optimizer, lambda s: (1 - s / (max_iter + 1))**power, last_step)
class SquaredLR(LambdaStepLR):
""" Used for SGD Lars"""
def __init__(self, optimizer, max_iter, last_step=-1):
super(SquaredLR, self).__init__(optimizer, lambda s: (1 - s / (max_iter + 1))**2, last_step)
class ExpLR(LambdaStepLR):
def __init__(self, optimizer, step_size, gamma=0.9, last_step=-1):
# (0.9 ** 21.854) = 0.1, (0.95 ** 44.8906) = 0.1
# To get 0.1 every N using gamma 0.9, N * log(0.9)/log(0.1) = 0.04575749 N
# To get 0.1 every N using gamma g, g ** N = 0.1 -> N * log(g) = log(0.1) -> g = np.exp(log(0.1) / N)
super(ExpLR, self).__init__(optimizer, lambda s: gamma**(s / step_size), last_step)
def initialize_optimizer(params, config):
assert config.optimizer in ['SGD', 'Adagrad', 'Adam', 'RMSProp', 'Rprop', 'SGDLars']
if config.optimizer == 'SGD':
return SGD(
params,
lr=config.lr,
momentum=config.sgd_momentum,
dampening=config.sgd_dampening,
weight_decay=config.weight_decay)
elif config.optimizer == 'Adam':
return Adam(
params,
lr=config.lr,
betas=(config.adam_beta1, config.adam_beta2),
weight_decay=config.weight_decay)
else:
logging.error('Optimizer type not supported')
raise ValueError('Optimizer type not supported')
def initialize_scheduler(optimizer, config, last_step=-1):
if config.scheduler == 'StepLR':
return StepLR(
optimizer, step_size=config.step_size, gamma=config.step_gamma, last_epoch=last_step)
elif config.scheduler == 'PolyLR':
return PolyLR(optimizer, max_iter=config.max_iter, power=config.poly_power, last_step=last_step)
elif config.scheduler == 'SquaredLR':
return SquaredLR(optimizer, max_iter=config.max_iter, last_step=last_step)
elif config.scheduler == 'ExpLR':
return ExpLR(
optimizer, step_size=config.exp_step_size, gamma=config.exp_gamma, last_step=last_step)
else:
logging.error('Scheduler not supported')
| ContrastiveSceneContexts-main | downstream/semseg/lib/solvers.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import shutil
import tempfile
import warnings
import numpy as np
import torch
import torch.nn as nn
from sklearn.metrics import average_precision_score
from sklearn.preprocessing import label_binarize
from datasets.evaluation.evaluate_semantic_label import Evaluator
from lib.utils import Timer, AverageMeter, precision_at_one, fast_hist, per_class_iu, \
get_prediction, get_torch_device, visualize_results, \
permute_pointcloud, save_rotation_pred
from MinkowskiEngine import SparseTensor
def print_info(iteration,
max_iteration,
data_time,
iter_time,
losses=None,
scores=None,
ious=None,
hist=None,
ap_class=None,
class_names=None):
debug_str = "{}/{}: ".format(iteration + 1, max_iteration)
debug_str += "Data time: {:.4f}, Iter time: {:.4f}".format(data_time, iter_time)
acc = hist.diagonal() / hist.sum(1) * 100
debug_str += "\tLoss {loss.val:.3f} (AVG: {loss.avg:.3f})\t" \
"Score {top1.val:.3f} (AVG: {top1.avg:.3f})\t" \
"mIOU {mIOU:.3f} mAP {mAP:.3f} mAcc {mAcc:.3f}\n".format(
loss=losses, top1=scores, mIOU=np.nanmean(ious),
mAP=np.nanmean(ap_class), mAcc=np.nanmean(acc))
if class_names is not None:
debug_str += "\nClasses: " + " ".join(class_names) + '\n'
debug_str += 'IOU: ' + ' '.join('{:.03f}'.format(i) for i in ious) + '\n'
debug_str += 'mAP: ' + ' '.join('{:.03f}'.format(i) for i in ap_class) + '\n'
debug_str += 'mAcc: ' + ' '.join('{:.03f}'.format(i) for i in acc) + '\n'
logging.info(debug_str)
def average_precision(prob_np, target_np):
num_class = prob_np.shape[1]
label = label_binarize(target_np, classes=list(range(num_class)))
with np.errstate(divide='ignore', invalid='ignore'):
return average_precision_score(label, prob_np, average=None)
def test(model, data_loader, config):
device = get_torch_device(config.misc.is_cuda)
dataset = data_loader.dataset
num_labels = dataset.NUM_LABELS
global_timer, data_timer, iter_timer = Timer(), Timer(), Timer()
criterion = nn.CrossEntropyLoss(ignore_index=config.data.ignore_label)
losses, scores, ious = AverageMeter(), AverageMeter(), 0
aps = np.zeros((0, num_labels))
hist = np.zeros((num_labels, num_labels))
logging.info('===> Start testing')
global_timer.tic()
data_iter = data_loader.__iter__()
max_iter = len(data_loader)
max_iter_unique = max_iter
#------------------------------- add -------------------------------------
VALID_CLASS_IDS = torch.FloatTensor(dataset.VALID_CLASS_IDS).long()
# Fix batch normalization running mean and std
model.eval()
# Clear cache (when run in val mode, cleanup training cache)
torch.cuda.empty_cache()
if config.test.save_features:
save_feat_dir = config.test.save_feat_dir
os.makedirs(save_feat_dir, exist_ok=True)
with torch.no_grad():
for iteration in range(max_iter):
data_timer.tic()
if config.data.return_transformation:
coords, input, target, transformation = data_iter.next()
else:
coords, input, target = data_iter.next()
transformation = None
data_time = data_timer.toc(False)
# Preprocess input
iter_timer.tic()
if config.net.wrapper_type != None:
color = input[:, :3].int()
if config.augmentation.normalize_color:
input[:, :3] = input[:, :3] / 255. - 0.5
sinput = SparseTensor(input, coords).to(device)
# Feed forward
inputs = (sinput,) if config.net.wrapper_type == None else (sinput, coords, color)
soutput, out_feats = model(*inputs)
output = soutput.F
pred = get_prediction(dataset, output, target).int()
if config.test.evaluate_benchmark:
# ---------------- point level -------------------
scene_id = dataset.get_output_id(iteration)
inverse_mapping = dataset.get_original_pointcloud(coords, transformation, iteration)
CLASS_MAP = np.array(dataset.VALID_CLASS_IDS)
pred_points = CLASS_MAP[pred.cpu().numpy()][inverse_mapping[0]]
# for benchmark
Evaluator.write_to_benchmark(scene_id=scene_id, pred_ids=pred_points)
iter_time = iter_timer.toc(False)
if config.test.save_features:
dataset.save_features(coords, out_feats.F, transformation, iteration, save_feat_dir)
target_np = target.numpy()
num_sample = target_np.shape[0]
target = target.to(device)
cross_ent = criterion(output, target.long())
losses.update(float(cross_ent), num_sample)
scores.update(precision_at_one(pred, target), num_sample)
hist += fast_hist(pred.cpu().numpy().flatten(), target_np.flatten(), num_labels)
ious = per_class_iu(hist) * 100
prob = torch.nn.functional.softmax(output, dim=1)
ap = average_precision(prob.cpu().detach().numpy(), target_np)
aps = np.vstack((aps, ap))
# Due to heavy bias in class, there exists class with no test label at all
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
ap_class = np.nanmean(aps, 0) * 100.
if iteration % config.test.test_stat_freq == 0 and iteration > 0:
reordered_ious = dataset.reorder_result(ious)
reordered_ap_class = dataset.reorder_result(ap_class)
class_names = dataset.get_classnames()
print_info(
iteration,
max_iter_unique,
data_time,
iter_time,
losses,
scores,
reordered_ious,
hist,
reordered_ap_class,
class_names=class_names)
if iteration % config.train.empty_cache_freq == 0:
# Clear cache
torch.cuda.empty_cache()
global_time = global_timer.toc(False)
reordered_ious = dataset.reorder_result(ious)
reordered_ap_class = dataset.reorder_result(ap_class)
class_names = dataset.get_classnames()
print_info(
iteration,
max_iter_unique,
data_time,
iter_time,
losses,
scores,
reordered_ious,
hist,
reordered_ap_class,
class_names=class_names)
logging.info("Finished test. Elapsed time: {:.4f}".format(global_time))
return losses.avg, scores.avg, np.nanmean(ap_class), np.nanmean(per_class_iu(hist)) * 100
| ContrastiveSceneContexts-main | downstream/semseg/lib/test.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import logging
import os
import sys
import torch
import logging
import torch.nn.functional as F
from torch import nn
from torch.serialization import default_restore_location
from tensorboardX import SummaryWriter
from MinkowskiEngine import SparseTensor
from omegaconf import OmegaConf
from lib.distributed import get_world_size, all_gather, is_master_proc
from models import load_model
from lib.test import test as test_
from lib.solvers import initialize_optimizer, initialize_scheduler
from datasets import load_dataset
from datasets.dataset import initialize_data_loader
from lib.utils import checkpoint, precision_at_one, Timer, AverageMeter, get_prediction, load_state_with_same_shape, count_parameters
class SegmentationTrainer:
def __init__(self, config):
self.is_master = is_master_proc(config.misc.num_gpus) if config.misc.num_gpus > 1 else True
self.cur_device = torch.cuda.current_device()
# load the configurations
self.setup_logging()
if os.path.exists('config.yaml'):
logging.info('===> Loading exsiting config file')
config = OmegaConf.load('config.yaml')
logging.info('===> Loaded exsiting config file')
logging.info('===> Configurations')
logging.info(config.pretty())
# dataloader
DatasetClass = load_dataset(config.data.dataset)
logging.info('===> Initializing dataloader')
self.train_data_loader = initialize_data_loader(
DatasetClass, config, phase=config.train.train_phase,
num_workers=config.data.num_workers, augment_data=True,
shuffle=True, repeat=True, batch_size=config.data.batch_size // config.misc.num_gpus,
limit_numpoints=config.data.train_limit_numpoints)
self.val_data_loader = initialize_data_loader(
DatasetClass, config, phase=config.train.val_phase,
num_workers=config.data.num_val_workers, augment_data=False,
shuffle=False, repeat=False,
batch_size=1, limit_numpoints=False)
self.test_data_loader = initialize_data_loader(
DatasetClass, config, phase=config.test.test_phase,
num_workers=config.data.num_workers, augment_data=False,
shuffle=False, repeat=False,
batch_size=1, limit_numpoints=False)
# Model initialization
logging.info('===> Building model')
num_in_channel = self.train_data_loader.dataset.NUM_IN_CHANNEL
num_labels = self.train_data_loader.dataset.NUM_LABELS
NetClass = load_model(config.net.model)
model = NetClass(num_in_channel, num_labels, config)
logging.info('===> Number of trainable parameters: {}: {}'.format(NetClass.__name__, count_parameters(model)))
logging.info(model)
# Load weights if specified by the parameter.
if config.net.weights != '':
logging.info('===> Loading weights: ' + config.net.weights)
state = torch.load(config.net.weights, map_location=lambda s, l: default_restore_location(s, 'cpu'))
matched_weights = load_state_with_same_shape(model, state['state_dict'])
model_dict = model.state_dict()
model_dict.update(matched_weights)
model.load_state_dict(model_dict)
model = model.cuda()
if config.misc.num_gpus > 1:
model = torch.nn.parallel.DistributedDataParallel(
module=model, device_ids=[self.cur_device],
output_device=self.cur_device,
broadcast_buffers=False
)
self.config = config
self.model = model
if self.is_master:
self.writer = SummaryWriter(log_dir='tensorboard')
self.optimizer = initialize_optimizer(model.parameters(), config.optimizer)
self.scheduler = initialize_scheduler(self.optimizer, config.optimizer)
self.criterion = nn.CrossEntropyLoss(ignore_index=config.data.ignore_label)
checkpoint_fn = 'weights/weights.pth'
self.best_val_miou, self.best_val_miou_iter = 0,1
self.curr_iter, self.epoch, self.is_training = 1, 1, True
if os.path.isfile(checkpoint_fn):
logging.info("=> loading checkpoint '{}'".format(checkpoint_fn))
state = torch.load(checkpoint_fn, map_location=lambda s, l: default_restore_location(s, 'cpu'))
self.load_state(state['state_dict'])
self.curr_iter = state['iteration'] + 1
self.epoch = state['epoch']
self.scheduler = initialize_scheduler(self.optimizer, config.optimizer, last_step=self.curr_iter)
self.optimizer.load_state_dict(state['optimizer'])
if 'best_val_miou' in state:
self.best_val_miou = state['best_val_miou']
logging.info("=> loaded checkpoint '{}' (epoch {})".format(checkpoint_fn, state['epoch']))
else:
logging.info("=> no weights.pth")
def setup_logging(self):
ch = logging.StreamHandler(sys.stdout)
logging.getLogger().setLevel(logging.WARN)
if self.is_master:
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(
format=os.uname()[1].split('.')[0] + ' %(asctime)s %(message)s',
datefmt='%m/%d %H:%M:%S',
handlers=[ch])
def load_state(self, state):
if get_world_size() > 1:
_model = self.model.module
else:
_model = self.model
_model.load_state_dict(state)
def set_seed(self):
# Set seed based on args.seed and the update number so that we get
# reproducible results when resuming from checkpoints
seed = self.config.misc.seed + self.curr_iter
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def test(self):
return test_(self.model, self.test_data_loader, self.config)
def validate(self):
val_loss, val_score, _, val_miou = test_(self.model, self.val_data_loader, self.config)
self.writer.add_scalar('val/miou', val_miou, self.curr_iter)
self.writer.add_scalar('val/loss', val_loss, self.curr_iter)
self.writer.add_scalar('val/precision_at_1', val_score, self.curr_iter)
if val_miou > self.best_val_miou:
self.best_val_miou = val_miou
self.best_val_iou_iter = self.curr_iter
checkpoint(self.model, self.optimizer, self.epoch, self.curr_iter, self.config,
self.best_val_miou, "miou")
logging.info("Current best mIoU: {:.3f} at iter {}".format(self.best_val_miou, self.best_val_miou_iter))
checkpoint(self.model, self.optimizer, self.epoch, self.curr_iter, self.config,
self.best_val_miou)
def train(self):
# Set up the train flag for batch normalization
self.model.train()
# Configuration
data_timer, iter_timer = Timer(), Timer()
fw_timer, bw_timer, ddp_timer = Timer(), Timer(), Timer()
data_time_avg, iter_time_avg = AverageMeter(), AverageMeter()
fw_time_avg, bw_time_avg, ddp_time_avg = AverageMeter(), AverageMeter(), AverageMeter()
scores = AverageMeter()
losses = {
'semantic_loss': AverageMeter(),
'total_loss': AverageMeter()
}
# Train the network
logging.info('===> Start training on {} GPUs, batch-size={}'.format(
get_world_size(), self.config.data.batch_size))
data_iter = self.train_data_loader.__iter__() # (distributed) infinite sampler
while self.is_training:
for _ in range(len(self.train_data_loader) // self.config.optimizer.iter_size):
self.optimizer.zero_grad()
data_time, batch_score = 0, 0
batch_losses = {
'semantic_loss': 0.0,
'offset_dir_loss': 0.0,
'offset_norm_loss': 0.0,
'total_loss': 0.0}
iter_timer.tic()
# set random seed for every iteration for trackability
self.set_seed()
for sub_iter in range(self.config.optimizer.iter_size):
# Get training data
data_timer.tic()
if self.config.data.return_transformation:
coords, input, target, _ = data_iter.next()
else:
coords, input, target = data_iter.next()
# Preprocess input
color = input[:, :3].int()
if self.config.augmentation.normalize_color:
input[:, :3] = input[:, :3] / 255. - 0.5
sinput = SparseTensor(input, coords).to(self.cur_device)
data_time += data_timer.toc(False)
# Feed forward
fw_timer.tic()
inputs = (sinput,)
soutput, _ = self.model(*inputs)
# The output of the network is not sorted
target = target.long().to(self.cur_device)
semantic_loss = self.criterion(soutput.F, target.long())
total_loss = semantic_loss
# Compute and accumulate gradient
total_loss /= self.config.optimizer.iter_size
pred = get_prediction(self.train_data_loader.dataset, soutput.F, target)
score = precision_at_one(pred, target)
# bp the loss
fw_timer.toc(False)
bw_timer.tic()
total_loss.backward()
bw_timer.toc(False)
# gather information
logging_output = {'total_loss': total_loss.item(), 'semantic_loss': semantic_loss.item(), 'score': score / self.config.optimizer.iter_size}
ddp_timer.tic()
if self.config.misc.num_gpus > 1:
logging_output = all_gather(logging_output)
logging_output = {w: np.mean([
a[w] for a in logging_output]
) for w in logging_output[0]}
batch_losses['total_loss'] += logging_output['total_loss']
batch_losses['semantic_loss'] += logging_output['semantic_loss']
batch_score += logging_output['score']
ddp_timer.toc(False)
# Update number of steps
self.optimizer.step()
self.scheduler.step()
data_time_avg.update(data_time)
iter_time_avg.update(iter_timer.toc(False))
fw_time_avg.update(fw_timer.diff)
bw_time_avg.update(bw_timer.diff)
ddp_time_avg.update(ddp_timer.diff)
losses['total_loss'].update(batch_losses['total_loss'], target.size(0))
losses['semantic_loss'].update(batch_losses['semantic_loss'], target.size(0))
scores.update(batch_score, target.size(0))
if self.curr_iter >= self.config.optimizer.max_iter:
self.is_training = False
break
if self.curr_iter % self.config.train.stat_freq == 0 or self.curr_iter == 1:
lrs = ', '.join(['{:.3e}'.format(x) for x in self.scheduler.get_last_lr()])
debug_str = "===> Epoch[{}]({}/{}): Loss {:.4f}, Sem {:.4f} \tLR: {}\t".format(
self.epoch, self.curr_iter, len(self.train_data_loader) // self.config.optimizer.iter_size,
losses['total_loss'].avg, losses['semantic_loss'].avg, lrs)
debug_str += "Score {:.3f}\tData time: {:.4f}, Forward time: {:.4f}, Backward time: {:.4f}, DDP time: {:.4f}, Total iter time: {:.4f}".format(
scores.avg, data_time_avg.avg, fw_time_avg.avg, bw_time_avg.avg, ddp_time_avg.avg, iter_time_avg.avg)
logging.info(debug_str)
# Reset timers
data_time_avg.reset()
iter_time_avg.reset()
# Write logs
if self.is_master:
self.writer.add_scalar('train/loss', losses['total_loss'].avg, self.curr_iter)
self.writer.add_scalar('train/semantic_loss', losses['semantic_loss'].avg, self.curr_iter)
self.writer.add_scalar('train/precision_at_1', scores.avg, self.curr_iter)
self.writer.add_scalar('train/learning_rate', self.scheduler.get_last_lr()[0], self.curr_iter)
# clear loss
losses['total_loss'].reset()
losses['semantic_loss'].reset()
scores.reset()
# Validation
if self.curr_iter % self.config.train.val_freq == 0 and self.is_master:
self.validate()
self.model.train()
if self.curr_iter % self.config.train.empty_cache_freq == 0:
# Clear cache
torch.cuda.empty_cache()
# End of iteration
self.curr_iter += 1
self.epoch += 1
# Explicit memory cleanup
if hasattr(data_iter, 'cleanup'):
data_iter.cleanup()
# Save the final model
if self.is_master:
self.validate()
| ContrastiveSceneContexts-main | downstream/semseg/lib/ddp_trainer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import trimesh
# color palette for nyu40 labels
def create_color_palette():
return [
(0, 0, 0),
(174, 199, 232), # wall
(152, 223, 138), # floor
(31, 119, 180), # cabinet
(255, 187, 120), # bed
(188, 189, 34), # chair
(140, 86, 75), # sofa
(255, 152, 150), # table
(214, 39, 40), # door
(197, 176, 213), # window
(148, 103, 189), # bookshelf
(196, 156, 148), # picture
(23, 190, 207), # counter
(178, 76, 76),
(247, 182, 210), # desk
(66, 188, 102),
(219, 219, 141), # curtain
(140, 57, 197),
(202, 185, 52),
(51, 176, 203),
(200, 54, 131),
(92, 193, 61),
(78, 71, 183),
(172, 114, 82),
(255, 127, 14), # refrigerator
(91, 163, 138),
(153, 98, 156),
(140, 153, 101),
(158, 218, 229), # shower curtain
(100, 125, 154),
(178, 127, 135),
(120, 185, 128),
(146, 111, 194),
(44, 160, 44), # toilet
(112, 128, 144), # sink
(96, 207, 209),
(227, 119, 194), # bathtub
(213, 92, 176),
(94, 106, 211),
(82, 84, 163), # otherfurn
(100, 85, 144),
]
def write_triangle_mesh(vertices, colors, faces, outputFile):
mesh = trimesh.Trimesh(vertices=vertices, vertex_colors=colors, faces=faces, process=False)
mesh.export(outputFile)
def read_triangle_mesh(filename):
mesh = trimesh.load_mesh(filename, process=False)
if isinstance(mesh, trimesh.PointCloud):
vertices = mesh.vertices
colors = mesh.colors
faces = None
elif isinstance(mesh, trimesh.Trimesh):
vertices = mesh.vertices
colors = mesh.visual.vertex_colors
faces = mesh.faces
return vertices, colors, faces
| ContrastiveSceneContexts-main | downstream/semseg/lib/io3d.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python3
import os
import time
import torch
import signal
import pickle
import threading
import functools
import traceback
import torch.nn as nn
import torch.distributed as dist
import multiprocessing as mp
"""Multiprocessing error handler."""
class ChildException(Exception):
"""Wraps an exception from a child process."""
def __init__(self, child_trace):
super(ChildException, self).__init__(child_trace)
class ErrorHandler(object):
"""Multiprocessing error handler (based on fairseq's).
Listens for errors in child processes and
propagates the tracebacks to the parent process.
"""
def __init__(self, error_queue):
# Shared error queue
self.error_queue = error_queue
# Children processes sharing the error queue
self.children_pids = []
# Start a thread listening to errors
self.error_listener = threading.Thread(target=self.listen, daemon=True)
self.error_listener.start()
# Register the signal handler
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
"""Registers a child process."""
self.children_pids.append(pid)
def listen(self):
"""Listens for errors in the error queue."""
# Wait until there is an error in the queue
child_trace = self.error_queue.get()
# Put the error back for the signal handler
self.error_queue.put(child_trace)
# Invoke the signal handler
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, sig_num, stack_frame):
"""Signal handler."""
# Kill children processes
for pid in self.children_pids:
os.kill(pid, signal.SIGINT)
# Propagate the error from the child process
raise ChildException(self.error_queue.get())
"""Multiprocessing helpers."""
def run(proc_rank, world_size, error_queue, fun, fun_args, fun_kwargs):
"""Runs a function from a child process."""
try:
# Initialize the process group
init_process_group(proc_rank, world_size)
# Run the function
fun(*fun_args, **fun_kwargs)
except:
# Propagate exception to the parent process
error_queue.put(traceback.format_exc())
finally:
destroy_process_group()
def multi_proc_run(num_proc, fun, fun_args=(), fun_kwargs={}):
"""Runs a function in a multi-proc setting."""
# Handle errors from training subprocesses
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Run each training subprocess
ps = []
for i in range(num_proc):
p_i = mp.Process(
target=run,
args=(i, num_proc, error_queue, fun, fun_args, fun_kwargs)
)
ps.append(p_i)
p_i.start()
error_handler.add_child(p_i.pid)
# Wait for each subprocess to finish
for p in ps:
p.join()
"""Distributed helpers."""
def is_master_proc(num_gpus):
"""Determines if the current process is the master process.
Master process is responsible for logging, writing and loading checkpoints.
In the multi GPU setting, we assign the master role to the rank 0 process.
When training using a single GPU, there is only one training processes
which is considered the master processes.
"""
return num_gpus == 1 or torch.distributed.get_rank() == 0
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def all_gather_differentiable(tensor):
"""
Run differentiable gather function for SparseConv features with variable number of points.
tensor: [num_points, feature_dim]
"""
world_size = get_world_size()
if world_size == 1:
return [tensor]
num_points, f_dim = tensor.size()
local_np = torch.LongTensor([num_points]).to("cuda")
np_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)]
dist.all_gather(np_list, local_np)
np_list = [int(np.item()) for np in np_list]
max_np = max(np_list)
tensor_list = []
for _ in np_list:
tensor_list.append(torch.FloatTensor(size=(max_np, f_dim)).to("cuda"))
if local_np != max_np:
padding = torch.zeros(size=(max_np-local_np, f_dim)).to("cuda").float()
tensor = torch.cat((tensor, padding), dim=0)
assert tensor.size() == (max_np, f_dim)
dist.all_gather(tensor_list, tensor)
data_list = []
for gather_np, gather_tensor in zip(np_list, tensor_list):
gather_tensor = gather_tensor[:gather_np]
assert gather_tensor.size() == (gather_np, f_dim)
data_list.append(gather_tensor)
return data_list
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.LongTensor([tensor.numel()]).to("cuda")
size_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda"))
if local_size != max_size:
padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def init_process_group(proc_rank, world_size):
"""Initializes the default process group."""
# Set the GPU to use
torch.cuda.set_device(proc_rank)
# Initialize the process group
torch.distributed.init_process_group(
backend="nccl",
init_method="tcp://{}:{}".format("localhost", "10001"),
world_size=world_size,
rank=proc_rank
)
def destroy_process_group():
"""Destroys the default process group."""
torch.distributed.destroy_process_group()
| ContrastiveSceneContexts-main | downstream/semseg/lib/distributed.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
from numpy.linalg import matrix_rank, inv
from plyfile import PlyData, PlyElement
import pandas as pd
COLOR_MAP_RGB = (
(241, 255, 82),
(102, 168, 226),
(0, 255, 0),
(113, 143, 65),
(89, 173, 163),
(254, 158, 137),
(190, 123, 75),
(100, 22, 116),
(0, 18, 141),
(84, 84, 84),
(85, 116, 127),
(255, 31, 33),
(228, 228, 228),
(0, 255, 0),
(70, 145, 150),
(237, 239, 94),
)
IGNORE_COLOR = (0, 0, 0)
def read_plyfile(filepath):
"""Read ply file and return it as numpy array. Returns None if emtpy."""
with open(filepath, 'rb') as f:
plydata = PlyData.read(f)
if plydata.elements:
return pd.DataFrame(plydata.elements[0].data).values
def save_point_cloud(points_3d, filename, binary=True, with_label=False, verbose=True):
"""Save an RGB point cloud as a PLY file.
Args:
points_3d: Nx6 matrix where points_3d[:, :3] are the XYZ coordinates and points_3d[:, 4:] are
the RGB values. If Nx3 matrix, save all points with [128, 128, 128] (gray) color.
"""
assert points_3d.ndim == 2
if with_label:
assert points_3d.shape[1] == 7
python_types = (float, float, float, int, int, int, int)
npy_types = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'),
('blue', 'u1'), ('label', 'u1')]
else:
if points_3d.shape[1] == 3:
gray_concat = np.tile(np.array([128], dtype=np.uint8), (points_3d.shape[0], 3))
points_3d = np.hstack((points_3d, gray_concat))
assert points_3d.shape[1] == 6
python_types = (float, float, float, int, int, int)
npy_types = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'),
('blue', 'u1')]
if binary is True:
# Format into NumPy structured array
vertices = []
for row_idx in range(points_3d.shape[0]):
cur_point = points_3d[row_idx]
vertices.append(tuple(dtype(point) for dtype, point in zip(python_types, cur_point)))
vertices_array = np.array(vertices, dtype=npy_types)
el = PlyElement.describe(vertices_array, 'vertex')
# Write
PlyData([el]).write(filename)
else:
# PlyData([el], text=True).write(filename)
with open(filename, 'w') as f:
f.write('ply\n'
'format ascii 1.0\n'
'element vertex %d\n'
'property float x\n'
'property float y\n'
'property float z\n'
'property uchar red\n'
'property uchar green\n'
'property uchar blue\n'
'property uchar alpha\n'
'end_header\n' % points_3d.shape[0])
for row_idx in range(points_3d.shape[0]):
X, Y, Z, R, G, B = points_3d[row_idx]
f.write('%f %f %f %d %d %d 0\n' % (X, Y, Z, R, G, B))
if verbose is True:
print('Saved point cloud to: %s' % filename)
class Camera(object):
def __init__(self, intrinsics):
self._intrinsics = intrinsics
self._camera_matrix = self.build_camera_matrix(self.intrinsics)
self._K_inv = inv(self.camera_matrix)
@staticmethod
def build_camera_matrix(intrinsics):
"""Build the 3x3 camera matrix K using the given intrinsics.
Equation 6.10 from HZ.
"""
f = intrinsics['focal_length']
pp_x = intrinsics['pp_x']
pp_y = intrinsics['pp_y']
K = np.array([[f, 0, pp_x], [0, f, pp_y], [0, 0, 1]], dtype=np.float32)
# K[:, 0] *= -1. # Step 1 of Kyle
assert matrix_rank(K) == 3
return K
@staticmethod
def extrinsics2RT(extrinsics):
"""Convert extrinsics matrix to separate rotation matrix R and translation vector T.
"""
assert extrinsics.shape == (4, 4)
R = extrinsics[:3, :3]
T = extrinsics[3, :3]
R = np.copy(R)
T = np.copy(T)
T = T.reshape(3, 1)
R[0, :] *= -1. # Step 1 of Kyle
T *= 100. # Convert from m to cm
return R, T
def project(self, points_3d, extrinsics=None):
"""Project a 3D point in camera coordinates into the camera/image plane.
Args:
point_3d:
"""
if extrinsics is not None: # Map points to camera coordinates
points_3d = self.world2camera(extrinsics, points_3d)
# TODO: Make sure to handle homogeneous AND non-homogeneous coordinate points
# TODO: Consider handling a set of points
raise NotImplementedError
def backproject(self,
depth_map,
labels=None,
max_depth=None,
max_height=None,
min_height=None,
rgb_img=None,
extrinsics=None,
prune=True):
"""Backproject a depth map into 3D points (camera coordinate system). Attach color if RGB image
is provided, otherwise use gray [128 128 128] color.
Does not show points at Z = 0 or maximum Z = 65535 depth.
Args:
labels: Tensor with the same shape as depth map (but can be 1-channel or 3-channel).
max_depth: Maximum depth in cm. All pts with depth greater than max_depth will be ignored.
max_height: Maximum height in cm. All pts with height greater than max_height will be ignored.
Returns:
points_3d: Numpy array of size Nx3 (XYZ) or Nx6 (XYZRGB).
"""
if labels is not None:
assert depth_map.shape[:2] == labels.shape[:2]
if (labels.ndim == 2) or ((labels.ndim == 3) and (labels.shape[2] == 1)):
n_label_channels = 1
elif (labels.ndim == 3) and (labels.shape[2] == 3):
n_label_channels = 3
if rgb_img is not None:
assert depth_map.shape[:2] == rgb_img.shape[:2]
else:
rgb_img = np.ones_like(depth_map, dtype=np.uint8) * 255
# Convert from 1-channel to 3-channel
if (rgb_img.ndim == 3) and (rgb_img.shape[2] == 1):
rgb_img = np.tile(rgb_img, [1, 1, 3])
# Convert depth map to single channel if it is multichannel
if (depth_map.ndim == 3) and depth_map.shape[2] == 3:
depth_map = np.squeeze(depth_map[:, :, 0])
depth_map = depth_map.astype(np.float32)
# Get image dimensions
H, W = depth_map.shape
# Create meshgrid (pixel coordinates)
Z = depth_map
A, B = np.meshgrid(range(W), range(H))
ones = np.ones_like(A)
grid = np.concatenate((A[:, :, np.newaxis], B[:, :, np.newaxis], ones[:, :, np.newaxis]),
axis=2)
grid = grid.astype(np.float32) * Z[:, :, np.newaxis]
# Nx3 where each row is (a*Z, b*Z, Z)
grid_flattened = grid.reshape((-1, 3))
grid_flattened = grid_flattened.T # 3xN where each col is (a*Z, b*Z, Z)
prod = np.dot(self.K_inv, grid_flattened)
XYZ = np.concatenate((prod[:2, :].T, Z.flatten()[:, np.newaxis]), axis=1) # Nx3
XYZRGB = np.hstack((XYZ, rgb_img.reshape((-1, 3))))
points_3d = XYZRGB
if labels is not None:
labels_reshaped = labels.reshape((-1, n_label_channels))
# Prune points
if prune is True:
valid = []
for idx in range(points_3d.shape[0]):
cur_y = points_3d[idx, 1]
cur_z = points_3d[idx, 2]
if (cur_z == 0) or (cur_z == 65535): # Don't show things at 0 distance or max distance
continue
elif (max_depth is not None) and (cur_z > max_depth):
continue
elif (max_height is not None) and (cur_y > max_height):
continue
elif (min_height is not None) and (cur_y < min_height):
continue
else:
valid.append(idx)
points_3d = points_3d[np.asarray(valid)]
if labels is not None:
labels_reshaped = labels_reshaped[np.asarray(valid)]
if extrinsics is not None:
points_3d = self.camera2world(extrinsics, points_3d)
if labels is not None:
points_3d_labels = np.hstack((points_3d[:, :3], labels_reshaped))
return points_3d, points_3d_labels
else:
return points_3d
@staticmethod
def _camera2world_transform(no_rgb_points_3d, R, T):
points_3d_world = (np.dot(R.T, no_rgb_points_3d.T) - T).T # Nx3
return points_3d_world
@staticmethod
def _world2camera_transform(no_rgb_points_3d, R, T):
points_3d_world = (np.dot(R, no_rgb_points_3d.T + T)).T # Nx3
return points_3d_world
def _transform_points(self, points_3d, extrinsics, transform):
"""Base/wrapper method for transforming points using R and T.
"""
assert points_3d.ndim == 2
orig_points_3d = points_3d
points_3d = np.copy(orig_points_3d)
if points_3d.shape[1] == 6: # XYZRGB
points_3d = points_3d[:, :3]
elif points_3d.shape[1] == 3: # XYZ
points_3d = points_3d
else:
raise ValueError('3D points need to be XYZ or XYZRGB.')
R, T = self.extrinsics2RT(extrinsics)
points_3d_world = transform(points_3d, R, T)
# Add color again (if appropriate)
if orig_points_3d.shape[1] == 6: # XYZRGB
points_3d_world = np.hstack((points_3d_world, orig_points_3d[:, -3:]))
return points_3d_world
def camera2world(self, extrinsics, points_3d):
"""Transform from camera coordinates (3D) to world coordinates (3D).
Args:
points_3d: Nx3 or Nx6 matrix of N points with XYZ or XYZRGB values.
"""
return self._transform_points(points_3d, extrinsics, self._camera2world_transform)
def world2camera(self, extrinsics, points_3d):
"""Transform from world coordinates (3D) to camera coordinates (3D).
"""
return self._transform_points(points_3d, extrinsics, self._world2camera_transform)
@property
def intrinsics(self):
return self._intrinsics
@property
def camera_matrix(self):
return self._camera_matrix
@property
def K_inv(self):
return self._K_inv
def colorize_pointcloud(xyz, label, ignore_label=255):
assert label[label != ignore_label].max() < len(COLOR_MAP_RGB), 'Not enough colors.'
label_rgb = np.array([COLOR_MAP_RGB[i] if i != ignore_label else IGNORE_COLOR for i in label])
return np.hstack((xyz, label_rgb))
class PlyWriter(object):
POINTCLOUD_DTYPE = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'),
('blue', 'u1')]
@classmethod
def read_txt(cls, txtfile):
# Read txt file and parse its content.
with open(txtfile) as f:
pointcloud = [l.split() for l in f]
# Load point cloud to named numpy array.
pointcloud = np.array(pointcloud).astype(np.float32)
assert pointcloud.shape[1] == 6
xyz = pointcloud[:, :3].astype(np.float32)
rgb = pointcloud[:, 3:].astype(np.uint8)
return xyz, rgb
@staticmethod
def write_ply(array, filepath):
ply_el = PlyElement.describe(array, 'vertex')
target_path, _ = os.path.split(filepath)
if target_path != '' and not os.path.exists(target_path):
os.makedirs(target_path)
PlyData([ply_el]).write(filepath)
@classmethod
def write_vertex_only_ply(cls, vertices, filepath):
# assume that points are N x 3 np array for vertex locations
color = 255 * np.ones((len(vertices), 3))
pc_points = np.array([tuple(p) for p in np.concatenate((vertices, color), axis=1)],
dtype=cls.POINTCLOUD_DTYPE)
cls.write_ply(pc_points, filepath)
@classmethod
def write_ply_vert_color(cls, vertices, colors, filepath):
# assume that points are N x 3 np array for vertex locations
pc_points = np.array([tuple(p) for p in np.concatenate((vertices, colors), axis=1)],
dtype=cls.POINTCLOUD_DTYPE)
cls.write_ply(pc_points, filepath)
@classmethod
def concat_label(cls, target, xyz, label):
subpointcloud = np.concatenate([xyz, label], axis=1)
subpointcloud = np.array([tuple(l) for l in subpointcloud], dtype=cls.POINTCLOUD_DTYPE)
return np.concatenate([target, subpointcloud], axis=0)
| ContrastiveSceneContexts-main | downstream/semseg/lib/pc_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
import os
import errno
import time
import torch
import numpy as np
from omegaconf import OmegaConf
from lib.pc_utils import colorize_pointcloud, save_point_cloud
from lib.distributed import get_world_size
def load_state_with_same_shape(model, weights):
# weights['conv1.kernel'] = weights['conv1.kernel'].repeat([1,3,1])/3.0
model_state = model.state_dict()
if list(weights.keys())[0].startswith('module.'):
logging.info("Loading multigpu weights with module. prefix...")
weights = {k.partition('module.')[2]:weights[k] for k in weights.keys()}
if list(weights.keys())[0].startswith('encoder.'):
logging.info("Loading multigpu weights with encoder. prefix...")
weights = {k.partition('encoder.')[2]:weights[k] for k in weights.keys()}
# print(weights.items())
# print("===================")
# print("===================")
# print("===================")
# print("===================")
# print("===================")
# print(model_state)
filtered_weights = {
k: v for k, v in weights.items() if k in model_state and v.size() == model_state[k].size()
}
logging.info("Loading weights:" + ', '.join(filtered_weights.keys()))
return filtered_weights
def checkpoint(model, optimizer, epoch, iteration, config, best_val_miou=None, postfix=None):
mkdir_p('weights')
filename = f"checkpoint_{config.net.model}_iter{iteration}.pth"
if config.train.overwrite_weights:
filename = f"checkpoint_{config.net.model}.pth"
if postfix is not None:
filename = f"checkpoint_{config.net.model}_{postfix}.pth"
checkpoint_file = 'weights/' + filename
_model = model.module if get_world_size() > 1 else model
state = {
'iteration': iteration,
'epoch': epoch,
'arch': config.net.model,
'state_dict': _model.state_dict(),
'optimizer': optimizer.state_dict()
}
if best_val_miou is not None:
state['best_val_miou'] = best_val_miou
state['best_val_iter'] = iteration
# save config
OmegaConf.save(config, 'config.yaml')
torch.save(state, checkpoint_file)
logging.info(f"Checkpoint saved to {checkpoint_file}")
if postfix == None:
# Delete symlink if it exists
if os.path.exists('weights/weights.pth'):
os.remove('weights/weights.pth')
# Create symlink
os.system('ln -s {} weights/weights.pth'.format(filename))
def precision_at_one(pred, target, ignore_label=255):
"""Computes the precision@k for the specified values of k"""
# batch_size = target.size(0) * target.size(1) * target.size(2)
pred = pred.view(1, -1)
target = target.view(1, -1)
correct = pred.eq(target)
correct = correct[target != ignore_label]
correct = correct.view(-1)
if correct.nelement():
return correct.float().sum(0).mul(100.0 / correct.size(0)).item()
else:
return float('nan')
def fast_hist(pred, label, n):
k = (label >= 0) & (label < n)
return np.bincount(n * label[k].astype(int) + pred[k], minlength=n**2).reshape(n, n)
def per_class_iu(hist):
with np.errstate(divide='ignore', invalid='ignore'):
return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
class WithTimer(object):
"""Timer for with statement."""
def __init__(self, name=None):
self.name = name
def __enter__(self):
self.tstart = time.time()
def __exit__(self, type, value, traceback):
out_str = 'Elapsed: %s' % (time.time() - self.tstart)
if self.name:
logging.info('[{self.name}]')
logging.info(out_str)
class Timer(object):
"""A simple timer."""
def __init__(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
def reset(self):
self.total_time = 0
self.calls = 0
self.start_time = 0
self.diff = 0
self.averate_time = 0
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.average_time = self.total_time / self.calls
if average:
return self.average_time
else:
return self.diff
class ExpTimer(Timer):
""" Exponential Moving Average Timer """
def __init__(self, alpha=0.5):
super(ExpTimer, self).__init__()
self.alpha = alpha
def toc(self):
self.diff = time.time() - self.start_time
self.average_time = self.alpha * self.diff + \
(1 - self.alpha) * self.average_time
return self.average_time
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def read_txt(path):
"""Read txt file into lines.
"""
with open(path) as f:
lines = f.readlines()
lines = [x.strip() for x in lines]
return lines
def debug_on():
import sys
import pdb
import functools
import traceback
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception:
info = sys.exc_info()
traceback.print_exception(*info)
pdb.post_mortem(info[2])
return wrapper
return decorator
def get_prediction(dataset, output, target):
return output.max(1)[1]
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def get_torch_device(is_cuda):
return torch.device('cuda' if is_cuda else 'cpu')
class HashTimeBatch(object):
def __init__(self, prime=5279):
self.prime = prime
def __call__(self, time, batch):
return self.hash(time, batch)
def hash(self, time, batch):
return self.prime * batch + time
def dehash(self, key):
time = key % self.prime
batch = key / self.prime
return time, batch
def save_rotation_pred(iteration, pred, dataset, save_pred_dir):
"""Save prediction results in original pointcloud scale."""
decode_label_map = {}
for k, v in dataset.label_map.items():
decode_label_map[v] = k
pred = np.array([decode_label_map[x] for x in pred], dtype=np.int)
out_rotation_txt = dataset.get_output_id(iteration) + '.txt'
out_rotation_path = save_pred_dir + '/' + out_rotation_txt
np.savetxt(out_rotation_path, pred, fmt='%i')
def visualize_results(coords, input, target, upsampled_pred, config, iteration):
# Get filter for valid predictions in the first batch.
target_batch = coords[:, 3].numpy() == 0
input_xyz = coords[:, :3].numpy()
target_valid = target.numpy() != 255
target_pred = np.logical_and(target_batch, target_valid)
target_nonpred = np.logical_and(target_batch, ~target_valid)
ptc_nonpred = np.hstack((input_xyz[target_nonpred], np.zeros((np.sum(target_nonpred), 3))))
# Unwrap file index if tested with rotation.
file_iter = iteration
if config.test_rotation >= 1:
file_iter = iteration // config.test.test_rotation
# Create directory to save visualization results.
os.makedirs(config.test.visualize_path, exist_ok=True)
# Label visualization in RGB.
xyzlabel = colorize_pointcloud(input_xyz[target_pred], upsampled_pred[target_pred])
xyzlabel = np.vstack((xyzlabel, ptc_nonpred))
filename = '_'.join([config.dataset, config.model, 'pred', '%04d.ply' % file_iter])
save_point_cloud(xyzlabel, os.path.join(config.test.visualize_path, filename), verbose=False)
# RGB input values visualization.
xyzrgb = np.hstack((input_xyz[target_batch], input[:, :3].cpu().numpy()[target_batch]))
filename = '_'.join([config.dataset, config.model, 'rgb', '%04d.ply' % file_iter])
save_point_cloud(xyzrgb, os.path.join(config.test.visualize_path, filename), verbose=False)
# Ground-truth visualization in RGB.
xyzgt = colorize_pointcloud(input_xyz[target_pred], target.numpy()[target_pred])
xyzgt = np.vstack((xyzgt, ptc_nonpred))
filename = '_'.join([config.dataset, config.model, 'gt', '%04d.ply' % file_iter])
save_point_cloud(xyzgt, os.path.join(config.test.visualize_path, filename), verbose=False)
def permute_pointcloud(input_coords, pointcloud, transformation, label_map,
voxel_output, voxel_pred):
"""Get permutation from pointcloud to input voxel coords."""
def _hash_coords(coords, coords_min, coords_dim):
return np.ravel_multi_index((coords - coords_min).T, coords_dim)
# Validate input.
input_batch_size = input_coords[:, -1].max().item()
pointcloud_batch_size = pointcloud[:, -1].max().int().item()
transformation_batch_size = transformation[:, -1].max().int().item()
assert input_batch_size == pointcloud_batch_size == transformation_batch_size
pointcloud_permutation, pointcloud_target = [], []
# Process each batch.
for i in range(input_batch_size + 1):
# Filter batch from the data.
input_coords_mask_b = input_coords[:, -1] == i
input_coords_b = (input_coords[input_coords_mask_b])[:, :-1].numpy()
pointcloud_b = pointcloud[pointcloud[:, -1] == i, :-1].numpy()
transformation_b = transformation[i, :-1].reshape(4, 4).numpy()
# Transform original pointcloud to voxel space.
original_coords1 = np.hstack((pointcloud_b[:, :3], np.ones((pointcloud_b.shape[0], 1))))
original_vcoords = np.floor(original_coords1 @ transformation_b.T)[:, :3].astype(int)
# Hash input and voxel coordinates to flat coordinate.
vcoords_all = np.vstack((input_coords_b, original_vcoords))
vcoords_min = vcoords_all.min(0)
vcoords_dims = vcoords_all.max(0) - vcoords_all.min(0) + 1
input_coords_key = _hash_coords(input_coords_b, vcoords_min, vcoords_dims)
original_vcoords_key = _hash_coords(original_vcoords, vcoords_min, vcoords_dims)
# Query voxel predictions from original pointcloud.
key_to_idx = dict(zip(input_coords_key, range(len(input_coords_key))))
pointcloud_permutation.append(
np.array([key_to_idx.get(i, -1) for i in original_vcoords_key]))
pointcloud_target.append(pointcloud_b[:, -1].astype(int))
pointcloud_permutation = np.concatenate(pointcloud_permutation)
# Prepare pointcloud permutation array.
pointcloud_permutation = torch.from_numpy(pointcloud_permutation)
permutation_mask = pointcloud_permutation >= 0
permutation_valid = pointcloud_permutation[permutation_mask]
# Permuate voxel output to pointcloud.
pointcloud_output = torch.zeros(pointcloud.shape[0], voxel_output.shape[1]).to(voxel_output)
pointcloud_output[permutation_mask] = voxel_output[permutation_valid]
# Permuate voxel prediction to pointcloud.
# NOTE: Invalid points (points found in pointcloud but not in the voxel) are mapped to 0.
pointcloud_pred = torch.ones(pointcloud.shape[0]).int().to(voxel_pred) * 0
pointcloud_pred[permutation_mask] = voxel_pred[permutation_valid]
# Map pointcloud target to respect dataset IGNORE_LABELS
pointcloud_target = torch.from_numpy(
np.array([label_map[i] for i in np.concatenate(pointcloud_target)])).int()
return pointcloud_output, pointcloud_pred, pointcloud_target
| ContrastiveSceneContexts-main | downstream/semseg/lib/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from scipy.sparse import csr_matrix
import torch
class SparseMM(torch.autograd.Function):
"""
Sparse x dense matrix multiplication with autograd support.
Implementation by Soumith Chintala:
https://discuss.pytorch.org/t/
does-pytorch-support-autograd-on-sparse-matrix/6156/7
"""
def forward(self, matrix1, matrix2):
self.save_for_backward(matrix1, matrix2)
return torch.mm(matrix1, matrix2)
def backward(self, grad_output):
matrix1, matrix2 = self.saved_tensors
grad_matrix1 = grad_matrix2 = None
if self.needs_input_grad[0]:
grad_matrix1 = torch.mm(grad_output, matrix2.t())
if self.needs_input_grad[1]:
grad_matrix2 = torch.mm(matrix1.t(), grad_output)
return grad_matrix1, grad_matrix2
def sparse_float_tensor(values, indices, size=None):
"""
Return a torch sparse matrix give values and indices (row_ind, col_ind).
If the size is an integer, return a square matrix with side size.
If the size is a torch.Size, use it to initialize the out tensor.
If none, the size is inferred.
"""
indices = torch.stack(indices).int()
sargs = [indices, values.float()]
if size is not None:
# Use the provided size
if isinstance(size, int):
size = torch.Size((size, size))
sargs.append(size)
if values.is_cuda:
return torch.cuda.sparse.FloatTensor(*sargs)
else:
return torch.sparse.FloatTensor(*sargs)
def diags(values, size=None):
values = values.view(-1)
n = values.nelement()
size = torch.Size((n, n))
indices = (torch.arange(0, n), torch.arange(0, n))
return sparse_float_tensor(values, indices, size)
def sparse_to_csr_matrix(tensor):
tensor = tensor.cpu()
inds = tensor._indices().numpy()
vals = tensor._values().numpy()
return csr_matrix((vals, (inds[0], inds[1])), shape=[s for s in tensor.shape])
def csr_matrix_to_sparse(mat):
row_ind, col_ind = mat.nonzero()
return sparse_float_tensor(
torch.from_numpy(mat.data),
(torch.from_numpy(row_ind), torch.from_numpy(col_ind)),
size=torch.Size(mat.shape))
| ContrastiveSceneContexts-main | downstream/semseg/lib/math_functions.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from MinkowskiEngine import MinkowskiGlobalPooling, MinkowskiBroadcastAddition, MinkowskiBroadcastMultiplication
class MinkowskiLayerNorm(nn.Module):
def __init__(self, num_features, eps=1e-5, D=-1):
super(MinkowskiLayerNorm, self).__init__()
self.num_features = num_features
self.eps = eps
self.weight = nn.Parameter(torch.ones(1, num_features))
self.bias = nn.Parameter(torch.zeros(1, num_features))
self.mean_in = MinkowskiGlobalPooling(dimension=D)
self.glob_sum = MinkowskiBroadcastAddition(dimension=D)
self.glob_sum2 = MinkowskiBroadcastAddition(dimension=D)
self.glob_mean = MinkowskiGlobalPooling(dimension=D)
self.glob_times = MinkowskiBroadcastMultiplication(dimension=D)
self.D = D
self.reset_parameters()
def __repr__(self):
s = f'(D={self.D})'
return self.__class__.__name__ + s
def reset_parameters(self):
self.weight.data.fill_(1)
self.bias.data.zero_()
def _check_input_dim(self, input):
if input.F.dim() != 2:
raise ValueError('expected 2D input (got {}D input)'.format(input.dim()))
def forward(self, x):
self._check_input_dim(x)
mean = self.mean_in(x).F.mean(-1, keepdim=True)
mean = mean + torch.zeros(mean.size(0), self.num_features).type_as(mean)
temp = self.glob_sum(x.F, -mean)**2
var = self.glob_mean(temp.data).mean(-1, keepdim=True)
var = var + torch.zeros(var.size(0), self.num_features).type_as(var)
instd = 1 / (var + self.eps).sqrt()
x = self.glob_times(self.glob_sum2(x, -mean), instd)
return x * self.weight + self.bias
class MinkowskiInstanceNorm(nn.Module):
def __init__(self, num_features, eps=1e-5, D=-1):
super(MinkowskiInstanceNorm, self).__init__()
self.eps = eps
self.weight = nn.Parameter(torch.ones(1, num_features))
self.bias = nn.Parameter(torch.zeros(1, num_features))
self.mean_in = MinkowskiGlobalPooling(dimension=D)
self.glob_sum = MinkowskiBroadcastAddition(dimension=D)
self.glob_sum2 = MinkowskiBroadcastAddition(dimension=D)
self.glob_mean = MinkowskiGlobalPooling(dimension=D)
self.glob_times = MinkowskiBroadcastMultiplication(dimension=D)
self.D = D
self.reset_parameters()
def __repr__(self):
s = f'(pixel_dist={self.pixel_dist}, D={self.D})'
return self.__class__.__name__ + s
def reset_parameters(self):
self.weight.data.fill_(1)
self.bias.data.zero_()
def _check_input_dim(self, input):
if input.dim() != 2:
raise ValueError('expected 2D input (got {}D input)'.format(input.dim()))
def forward(self, x):
self._check_input_dim(x)
mean_in = self.mean_in(x)
temp = self.glob_sum(x, -mean_in)**2
var_in = self.glob_mean(temp.data)
instd_in = 1 / (var_in + self.eps).sqrt()
x = self.glob_times(self.glob_sum2(x, -mean_in), instd_in)
return x * self.weight + self.bias
| ContrastiveSceneContexts-main | downstream/semseg/lib/layers.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import hydra
import torch
import numpy as np
from lib.ddp_trainer import SegmentationTrainer
from lib.distributed import multi_proc_run
def single_proc_run(config):
if not torch.cuda.is_available():
raise Exception('No GPUs FOUND.')
trainer = SegmentationTrainer(config)
if config.train.is_train:
trainer.train()
else:
trainer.test()
@hydra.main(config_path='config', config_name='default.yaml')
def main(config):
# fix seed
np.random.seed(config.misc.seed)
torch.manual_seed(config.misc.seed)
torch.cuda.manual_seed(config.misc.seed)
# Convert to dict
if config.misc.num_gpus > 1:
multi_proc_run(config.misc.num_gpus, fun=single_proc_run, fun_args=(config,))
else:
single_proc_run(config)
if __name__ == '__main__':
__spec__ = None
os.environ['MKL_THREADING_LAYER'] = 'GNU'
main()
| ContrastiveSceneContexts-main | downstream/insseg/ddp_main.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import random
import logging
import numpy as np
import scipy
import scipy.ndimage
import scipy.interpolate
import torch
# A sparse tensor consists of coordinates and associated features.
# You must apply augmentation to both.
# In 2D, flip, shear, scale, and rotation of images are coordinate transformation
# color jitter, hue, etc., are feature transformations
##############################
# Feature transformations
##############################
class ChromaticTranslation(object):
"""Add random color to the image, input must be an array in [0,255] or a PIL image"""
def __init__(self, trans_range_ratio=1e-1):
"""
trans_range_ratio: ratio of translation i.e. 255 * 2 * ratio * rand(-0.5, 0.5)
"""
self.trans_range_ratio = trans_range_ratio
def __call__(self, coords, feats, labels, instances):
if random.random() < 0.95:
tr = (np.random.rand(1, 3) - 0.5) * 255 * 2 * self.trans_range_ratio
feats[:, :3] = np.clip(tr + feats[:, :3], 0, 255)
return coords, feats, labels, instances
class ChromaticAutoContrast(object):
def __init__(self, randomize_blend_factor=True, blend_factor=0.5):
self.randomize_blend_factor = randomize_blend_factor
self.blend_factor = blend_factor
def __call__(self, coords, feats, labels, instances):
if random.random() < 0.2:
# mean = np.mean(feats, 0, keepdims=True)
# std = np.std(feats, 0, keepdims=True)
# lo = mean - std
# hi = mean + std
lo = feats[:, :3].min(0, keepdims=True)
hi = feats[:, :3].max(0, keepdims=True)
assert hi.max() > 1, f"invalid color value. Color is supposed to be [0-255]"
scale = 255 / (hi - lo)
contrast_feats = (feats[:, :3] - lo) * scale
blend_factor = random.random() if self.randomize_blend_factor else self.blend_factor
feats[:, :3] = (1 - blend_factor) * feats + blend_factor * contrast_feats
return coords, feats, labels, instances
class ChromaticJitter(object):
def __init__(self, std=0.01):
self.std = std
def __call__(self, coords, feats, labels, instances):
if random.random() < 0.95:
noise = np.random.randn(feats.shape[0], 3)
noise *= self.std * 255
feats[:, :3] = np.clip(noise + feats[:, :3], 0, 255)
return coords, feats, labels, instances
class HueSaturationTranslation(object):
@staticmethod
def rgb_to_hsv(rgb):
# Translated from source of colorsys.rgb_to_hsv
# r,g,b should be a numpy arrays with values between 0 and 255
# rgb_to_hsv returns an array of floats between 0.0 and 1.0.
rgb = rgb.astype('float')
hsv = np.zeros_like(rgb)
# in case an RGBA array was passed, just copy the A channel
hsv[..., 3:] = rgb[..., 3:]
r, g, b = rgb[..., 0], rgb[..., 1], rgb[..., 2]
maxc = np.max(rgb[..., :3], axis=-1)
minc = np.min(rgb[..., :3], axis=-1)
hsv[..., 2] = maxc
mask = maxc != minc
hsv[mask, 1] = (maxc - minc)[mask] / maxc[mask]
rc = np.zeros_like(r)
gc = np.zeros_like(g)
bc = np.zeros_like(b)
rc[mask] = (maxc - r)[mask] / (maxc - minc)[mask]
gc[mask] = (maxc - g)[mask] / (maxc - minc)[mask]
bc[mask] = (maxc - b)[mask] / (maxc - minc)[mask]
hsv[..., 0] = np.select([r == maxc, g == maxc], [bc - gc, 2.0 + rc - bc], default=4.0 + gc - rc)
hsv[..., 0] = (hsv[..., 0] / 6.0) % 1.0
return hsv
@staticmethod
def hsv_to_rgb(hsv):
# Translated from source of colorsys.hsv_to_rgb
# h,s should be a numpy arrays with values between 0.0 and 1.0
# v should be a numpy array with values between 0.0 and 255.0
# hsv_to_rgb returns an array of uints between 0 and 255.
rgb = np.empty_like(hsv)
rgb[..., 3:] = hsv[..., 3:]
h, s, v = hsv[..., 0], hsv[..., 1], hsv[..., 2]
i = (h * 6.0).astype('uint8')
f = (h * 6.0) - i
p = v * (1.0 - s)
q = v * (1.0 - s * f)
t = v * (1.0 - s * (1.0 - f))
i = i % 6
conditions = [s == 0.0, i == 1, i == 2, i == 3, i == 4, i == 5]
rgb[..., 0] = np.select(conditions, [v, q, p, p, t, v], default=v)
rgb[..., 1] = np.select(conditions, [v, v, v, q, p, p], default=t)
rgb[..., 2] = np.select(conditions, [v, p, t, v, v, q], default=p)
return rgb.astype('uint8')
def __init__(self, hue_max, saturation_max):
self.hue_max = hue_max
self.saturation_max = saturation_max
def __call__(self, coords, feats, labels, instances):
# Assume feat[:, :3] is rgb
hsv = HueSaturationTranslation.rgb_to_hsv(feats[:, :3])
hue_val = (random.random() - 0.5) * 2 * self.hue_max
sat_ratio = 1 + (random.random() - 0.5) * 2 * self.saturation_max
hsv[..., 0] = np.remainder(hue_val + hsv[..., 0] + 1, 1)
hsv[..., 1] = np.clip(sat_ratio * hsv[..., 1], 0, 1)
feats[:, :3] = np.clip(HueSaturationTranslation.hsv_to_rgb(hsv), 0, 255)
return coords, feats, labels, instances
##############################
# Coordinate transformations
##############################
class RandomDropout(object):
def __init__(self, dropout_ratio=0.2, dropout_application_ratio=0.5):
"""
upright_axis: axis index among x,y,z, i.e. 2 for z
"""
self.dropout_ratio = dropout_ratio
self.dropout_application_ratio = dropout_application_ratio
def __call__(self, coords, feats, labels, instances):
if random.random() < self.dropout_ratio:
N = len(coords)
inds = np.random.choice(N, int(N * (1 - self.dropout_ratio)), replace=False)
return coords[inds], feats[inds], labels[inds], instances[inds]
return coords, feats, labels, instances
class RandomHorizontalFlip(object):
def __init__(self, upright_axis, is_temporal):
"""
upright_axis: axis index among x,y,z, i.e. 2 for z
"""
self.is_temporal = is_temporal
self.D = 4 if is_temporal else 3
self.upright_axis = {'x': 0, 'y': 1, 'z': 2}[upright_axis.lower()]
# Use the rest of axes for flipping.
self.horz_axes = set(range(self.D)) - set([self.upright_axis])
def __call__(self, coords, feats, labels, instances):
if random.random() < 0.95:
for curr_ax in self.horz_axes:
if random.random() < 0.5:
coord_max = np.max(coords[:, curr_ax])
coords[:, curr_ax] = coord_max - coords[:, curr_ax]
return coords, feats, labels, instances
class ElasticDistortion:
def __init__(self, distortion_params):
self.distortion_params = distortion_params
def elastic_distortion(self, coords, feats, labels, granularity, magnitude):
"""Apply elastic distortion on sparse coordinate space.
pointcloud: numpy array of (number of points, at least 3 spatial dims)
granularity: size of the noise grid (in same scale[m/cm] as the voxel grid)
magnitude: noise multiplier
"""
blurx = np.ones((3, 1, 1, 1)).astype('float32') / 3
blury = np.ones((1, 3, 1, 1)).astype('float32') / 3
blurz = np.ones((1, 1, 3, 1)).astype('float32') / 3
coords_min = coords.min(0)
# Create Gaussian noise tensor of the size given by granularity.
noise_dim = ((coords - coords_min).max(0) // granularity).astype(int) + 3
noise = np.random.randn(*noise_dim, 3).astype(np.float32)
# Smoothing.
for _ in range(2):
noise = scipy.ndimage.filters.convolve(noise, blurx, mode='constant', cval=0)
noise = scipy.ndimage.filters.convolve(noise, blury, mode='constant', cval=0)
noise = scipy.ndimage.filters.convolve(noise, blurz, mode='constant', cval=0)
# Trilinear interpolate noise filters for each spatial dimensions.
ax = [
np.linspace(d_min, d_max, d)
for d_min, d_max, d in zip(coords_min - granularity, coords_min + granularity *
(noise_dim - 2), noise_dim)
]
interp = scipy.interpolate.RegularGridInterpolator(ax, noise, bounds_error=0, fill_value=0)
coords += interp(coords) * magnitude
return coords, feats, labels
def __call__(self, coords, feats, labels):
if self.distortion_params is not None:
if random.random() < 0.95:
for granularity, magnitude in self.distortion_params:
coords, feats, labels = self.elastic_distortion(coords, feats, labels, granularity,
magnitude)
return coords, feats, labels
class Compose(object):
"""Composes several transforms together."""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, *args):
for t in self.transforms:
args = t(*args)
return args
class cfl_collate_fn_factory:
"""Generates collate function for coords, feats, labels.
Args:
limit_numpoints: If 0 or False, does not alter batch size. If positive integer, limits batch
size so that the number of input coordinates is below limit_numpoints.
"""
def __init__(self, limit_numpoints):
self.limit_numpoints = limit_numpoints
def __call__(self, list_data):
coords, feats, labels, instances = list(zip(*list_data))
coords_batch, feats_batch, labels_batch, instances_batch = [], [], [], []
batch_id = 0
batch_num_points = 0
for batch_id, _ in enumerate(coords):
num_points = coords[batch_id].shape[0]
batch_num_points += num_points
if self.limit_numpoints and batch_num_points > self.limit_numpoints:
num_full_points = sum(len(c) for c in coords)
num_full_batch_size = len(coords)
logging.warning(
f'\t\tCannot fit {num_full_points} points into {self.limit_numpoints} points '
f'limit. Truncating batch size at {batch_id} out of {num_full_batch_size} with {batch_num_points - num_points}.'
)
break
# coords_batch.append(
# torch.cat((torch.from_numpy(
# coords[batch_id]).int(), torch.ones(num_points, 1).int() * batch_id), 1))
coords_batch.append(
torch.cat((torch.ones(num_points, 1).int() * batch_id, torch.from_numpy(
coords[batch_id]).int()), 1))
feats_batch.append(torch.from_numpy(feats[batch_id]))
labels_batch.append(torch.from_numpy(labels[batch_id]).int())
instances_batch.append(instances[batch_id])
batch_id += 1
# Concatenate all lists
coords_batch = torch.cat(coords_batch, 0).int()
feats_batch = torch.cat(feats_batch, 0).float()
labels_batch = torch.cat(labels_batch, 0).int()
return coords_batch, feats_batch, labels_batch, instances_batch
class cflt_collate_fn_factory:
"""Generates collate function for coords, feats, labels, point_clouds, transformations.
Args:
limit_numpoints: If 0 or False, does not alter batch size. If positive integer, limits batch
size so that the number of input coordinates is below limit_numpoints.
"""
def __init__(self, limit_numpoints):
self.limit_numpoints = limit_numpoints
def __call__(self, list_data):
coords, feats, labels, instances, transformations = list(zip(*list_data))
cfl_collate_fn = cfl_collate_fn_factory(limit_numpoints=self.limit_numpoints)
coords_batch, feats_batch, labels_batch, instances_batch = cfl_collate_fn(list(zip(coords, feats, labels, instances)))
num_truncated_batch = coords_batch[:, -1].max().item() + 1
batch_id = 0
transformations_batch = []
for transformation in transformations:
if batch_id >= num_truncated_batch:
break
transformations_batch.append(torch.from_numpy(transformation).float())
batch_id += 1
transformations_batch = torch.stack(transformations_batch, 0)
return coords_batch, feats_batch, labels_batch, instances_batch, transformations_batch
| ContrastiveSceneContexts-main | downstream/insseg/datasets/transforms.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#from lib.datasets import synthia
from datasets import stanford
from datasets import scannet
#from lib.datasets import shapenet
DATASETS = []
def add_datasets(module):
DATASETS.extend([getattr(module, a) for a in dir(module) if 'Dataset' in a])
add_datasets(stanford)
#add_datasets(synthia)
add_datasets(scannet)
#add_datasets(shapenet)
def load_dataset(name):
'''Creates and returns an instance of the datasets given its name.
'''
# Find the model class from its name
mdict = {dataset.__name__: dataset for dataset in DATASETS}
if name not in mdict:
print('Invalid dataset index. Options are:')
# Display a list of valid dataset names
for dataset in DATASETS:
print('\t* {}'.format(dataset.__name__))
raise ValueError(f'Dataset {name} not defined')
DatasetClass = mdict[name]
return DatasetClass
| ContrastiveSceneContexts-main | downstream/insseg/datasets/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import unittest
import imageio
import os
import os.path as osp
import pickle
import numpy as np
from collections import defaultdict
from plyfile import PlyData
from lib.pc_utils import Camera, read_plyfile
from lib.dataset import DictDataset, VoxelizationDataset, TemporalVoxelizationDataset, \
str2datasetphase_type, DatasetPhase
from lib.transforms import cfl_collate_fn_factory
from lib.utils import read_txt, debug_on
class SynthiaDataset(DictDataset):
NUM_LABELS = 16
def __init__(self, data_path_file, input_transform=None, target_transform=None):
with open(data_path_file, 'r') as f:
data_paths = pickle.load(f)
super(SynthiaDataset, self).__init__(data_paths, input_transform, target_transform)
@staticmethod
def load_extrinsics(extrinsics_file):
"""Load the camera extrinsics from a .txt file.
"""
lines = read_txt(extrinsics_file)
params = [float(x) for x in lines[0].split(' ')]
extrinsics_matrix = np.asarray(params).reshape([4, 4])
return extrinsics_matrix
@staticmethod
def load_intrinsics(intrinsics_file):
"""Load the camera intrinsics from a intrinsics.txt file.
intrinsics.txt: a text file containing 4 values that represent (in this order) {focal length,
principal-point-x, principal-point-y, baseline (m) with the corresponding right
camera}
"""
lines = read_txt(intrinsics_file)
assert len(lines) == 7
intrinsics = {
'focal_length': float(lines[0]),
'pp_x': float(lines[2]),
'pp_y': float(lines[4]),
'baseline': float(lines[6]),
}
return intrinsics
@staticmethod
def load_depth(depth_file):
"""Read a single depth map (.png) file.
1280x760
760 rows, 1280 columns.
Depth is encoded in any of the 3 channels in centimetres as an ushort.
"""
img = np.asarray(imageio.imread(depth_file, format='PNG-FI')) # uint16
img = img.astype(np.int32) # Convert to int32 for torch compatibility
return img
@staticmethod
def load_label(label_file):
"""Load the ground truth semantic segmentation label.
Annotations are given in two channels. The first channel contains the class of that pixel
(see the table below). The second channel contains the unique ID of the instance for those
objects that are dynamic (cars, pedestrians, etc.).
Class R G B ID
Void 0 0 0 0
Sky 128 128 128 1
Building 128 0 0 2
Road 128 64 128 3
Sidewalk 0 0 192 4
Fence 64 64 128 5
Vegetation 128 128 0 6
Pole 192 192 128 7
Car 64 0 128 8
Traffic Sign 192 128 128 9
Pedestrian 64 64 0 10
Bicycle 0 128 192 11
Lanemarking 0 172 0 12
Reserved - - - 13
Reserved - - - 14
Traffic Light 0 128 128 15
"""
img = np.asarray(imageio.imread(label_file, format='PNG-FI')) # uint16
img = img.astype(np.int32) # Convert to int32 for torch compatibility
return img
@staticmethod
def load_rgb(rgb_file):
"""Load RGB images. 1280x760 RGB images used for training.
760 rows, 1280 columns.
"""
img = np.array(imageio.imread(rgb_file)) # uint8
return img
class SynthiaVoxelizationDataset(VoxelizationDataset):
"""Load the ground truth semantic segmentation label.
Annotations are given in two channels. The first channel contains the class of that pixel
(see the table below). The second channel contains the unique ID of the instance for those
objects that are dynamic (cars, pedestrians, etc.).
Class R G B ID
Void 0 0 0 0
Sky 128 128 128 1
Building 128 0 0 2
Road 128 64 128 3
Sidewalk 0 0 192 4
Fence 64 64 128 5
Vegetation 128 128 0 6
Pole 192 192 128 7
Car 64 0 128 8
Traffic Sign 192 128 128 9
Pedestrian 64 64 0 10
Bicycle 0 128 192 11
Lanemarking 0 172 0 12
Reserved - - - 13
Reserved - - - 14
Traffic Light 0 128 128 15
"""
CLASS_LABELS = ('building', 'road', 'sidewalk', 'fence', 'vegetation', 'pole', 'car',
'sign', 'pedestrian', 'cyclist', 'lanemarking', 'traffic light')
VALID_CLASS_IDS = (2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 15)
# Voxelization arguments
CLIP_BOUND = ((-1800, 1800), (-1800, 1800), (-1800, 1800))
TEST_CLIP_BOUND = ((-2500, 2500), (-2500, 2500), (-2500, 2500))
VOXEL_SIZE = 15 # cm
PREVOXELIZATION_VOXEL_SIZE = 7.5
# Elastic distortion, (granularity, magitude) pairs
# ELASTIC_DISTORT_PARAMS = ((80, 300),)
# Augmentation arguments
ROTATION_AUGMENTATION_BOUND = ((0, 0), (-np.pi, np.pi), (0, 0))
TRANSLATION_AUGMENTATION_RATIO_BOUND = ((-0.1, 0.1), (0, 0), (-0.1, 0.1))
ROTATION_AXIS = 'y'
LOCFEAT_IDX = 1
NUM_LABELS = 16 # Automatically subtract ignore labels after processed
IGNORE_LABELS = (0, 1, 13, 14) # void, sky, reserved, reserved
# Split used in the Minkowski ConvNet, CVPR'19
DATA_PATH_FILE = {
DatasetPhase.Train: 'train_cvpr19.txt',
DatasetPhase.Val: 'val_cvpr19.txt',
DatasetPhase.Test: 'test_cvpr19.txt'
}
def __init__(self,
config,
prevoxel_transform=None,
input_transform=None,
target_transform=None,
augment_data=True,
elastic_distortion=False,
cache=False,
phase=DatasetPhase.Train):
if isinstance(phase, str):
phase = str2datasetphase_type(phase)
if phase not in [DatasetPhase.Train, DatasetPhase.TrainVal]:
self.CLIP_BOUND = self.TEST_CLIP_BOUND
data_root = config.data.synthia_path
data_paths = read_txt(osp.join('/checkpoint/jihou/data/synthia4d/splits', self.DATA_PATH_FILE[phase]))
if phase == DatasetPhase.Train:
data_paths = data_paths[:int(len(data_paths)*config.data.data_ratio)]
data_paths = [d.split()[0] for d in data_paths]
logging.info('Loading {}: {}'.format(self.__class__.__name__, self.DATA_PATH_FILE[phase]))
super().__init__(
data_paths,
data_root=data_root,
input_transform=input_transform,
target_transform=target_transform,
ignore_label=config.data.ignore_label,
return_transformation=config.data.return_transformation,
augment_data=augment_data,
elastic_distortion=elastic_distortion,
config=config)
def load_data(self, index):
filepath = self.data_root / self.data_paths[index]
plydata = PlyData.read(filepath)
data = plydata.elements[0].data
coords = np.array([data['x'], data['y'], data['z']], dtype=np.float32).T
feats = np.array([data['r'], data['g'], data['b']], dtype=np.float32).T
labels = np.array(data['l'], dtype=np.int32)
instances = np.zeros_like(labels)
return coords, feats, labels, instances
class SynthiaCVPR15cmVoxelizationDataset(SynthiaVoxelizationDataset):
pass
class SynthiaCVPR30cmVoxelizationDataset(SynthiaVoxelizationDataset):
VOXEL_SIZE = 30
class SynthiaAllSequencesVoxelizationDataset(SynthiaVoxelizationDataset):
DATA_PATH_FILE = {
DatasetPhase.Train: 'train_raw.txt',
DatasetPhase.Val: 'val_raw.txt',
DatasetPhase.Test: 'test_raw.txt'
}
class TestSynthia(unittest.TestCase):
@debug_on()
def test(self):
from torch.utils.data import DataLoader
from lib.utils import Timer
from config import get_config
config = get_config()
dataset = SynthiaVoxelizationDataset(config)
timer = Timer()
data_loader = DataLoader(
dataset=dataset,
collate_fn=cfl_collate_fn_factory(limit_numpoints=False),
num_workers=0,
batch_size=4,
shuffle=True)
# Start from index 1
# for i, batch in enumerate(data_loader, 1):
iter = data_loader.__iter__()
for i in range(100):
timer.tic()
batch = iter.next()
print(batch, timer.toc())
if __name__ == '__main__':
unittest.main()
| ContrastiveSceneContexts-main | downstream/insseg/datasets/synthia.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from abc import ABC
from pathlib import Path
from collections import defaultdict
import random
import numpy as np
from enum import Enum
import torch
from torch.utils.data import Dataset, DataLoader
import MinkowskiEngine as ME
from plyfile import PlyData
import datasets.transforms as t
from datasets.dataloader import InfSampler, DistributedInfSampler
from datasets.voxelizer import Voxelizer
from lib.distributed import get_world_size
class DatasetPhase(Enum):
Train = 0
Val = 1
Val2 = 2
TrainVal = 3
Test = 4
Debug = 5
def datasetphase_2str(arg):
if arg == DatasetPhase.Train:
return 'train'
elif arg == DatasetPhase.Val:
return 'val'
elif arg == DatasetPhase.Val2:
return 'val2'
elif arg == DatasetPhase.TrainVal:
return 'trainval'
elif arg == DatasetPhase.Test:
return 'test'
else:
raise ValueError('phase must be one of dataset enum.')
def str2datasetphase_type(arg):
if arg.upper() == 'TRAIN':
return DatasetPhase.Train
elif arg.upper() == 'VAL':
return DatasetPhase.Val
elif arg.upper() == 'VAL2':
return DatasetPhase.Val2
elif arg.upper() == 'TRAINVAL':
return DatasetPhase.TrainVal
elif arg.upper() == 'TEST':
return DatasetPhase.Test
else:
raise ValueError('phase must be one of train/val/test')
def cache(func):
def wrapper(self, *args, **kwargs):
# Assume that args[0] is index
index = args[0]
if self.cache:
if index not in self.cache_dict[func.__name__]:
results = func(self, *args, **kwargs)
self.cache_dict[func.__name__][index] = results
return self.cache_dict[func.__name__][index]
else:
return func(self, *args, **kwargs)
return wrapper
class DictDataset(Dataset, ABC):
IS_FULL_POINTCLOUD_EVAL = False
def __init__(self,
data_paths,
prevoxel_transform=None,
input_transform=None,
target_transform=None,
cache=False,
data_root='/'):
"""
data_paths: list of lists, [[str_path_to_input, str_path_to_label], [...]]
"""
Dataset.__init__(self)
# Allows easier path concatenation
if not isinstance(data_root, Path):
data_root = Path(data_root)
self.data_root = data_root
self.data_paths = sorted(data_paths)
self.prevoxel_transform = prevoxel_transform
self.input_transform = input_transform
self.target_transform = target_transform
# dictionary of input
self.data_loader_dict = {
'input': (self.load_input, self.input_transform),
'target': (self.load_target, self.target_transform)
}
# For large dataset, do not cache
self.cache = cache
self.cache_dict = defaultdict(dict)
self.loading_key_order = ['input', 'target']
def load_input(self, index):
raise NotImplementedError
def load_target(self, index):
raise NotImplementedError
def get_classnames(self):
pass
def reorder_result(self, result):
return result
def __getitem__(self, index):
out_array = []
for k in self.loading_key_order:
loader, transformer = self.data_loader_dict[k]
v = loader(index)
if transformer:
v = transformer(v)
out_array.append(v)
return out_array
def __len__(self):
return len(self.data_paths)
class VoxelizationDatasetBase(DictDataset, ABC):
IS_TEMPORAL = False
CLIP_BOUND = (-1000, -1000, -1000, 1000, 1000, 1000)
ROTATION_AXIS = None
NUM_IN_CHANNEL = None
NUM_LABELS = -1 # Number of labels in the dataset, including all ignore classes
IGNORE_LABELS = None # labels that are not evaluated
def __init__(self,
data_paths,
prevoxel_transform=None,
input_transform=None,
target_transform=None,
cache=False,
data_root='/',
ignore_mask=255,
return_transformation=False,
**kwargs):
"""
ignore_mask: label value for ignore class. It will not be used as a class in the loss or evaluation.
"""
DictDataset.__init__(
self,
data_paths,
prevoxel_transform=prevoxel_transform,
input_transform=input_transform,
target_transform=target_transform,
cache=cache,
data_root=data_root)
self.ignore_mask = ignore_mask
self.return_transformation = return_transformation
def __getitem__(self, index):
raise NotImplementedError
def load_ply(self, index):
filepath = self.data_root / self.data_paths[index]
plydata = PlyData.read(filepath)
data = plydata.elements[0].data
coords = np.array([data['x'], data['y'], data['z']], dtype=np.float32).T
feats = np.array([data['red'], data['green'], data['blue']], dtype=np.float32).T
labels = np.array(data['label'], dtype=np.int32)
return coords, feats, labels, None
def load_data(self, index):
raise NotImplementedError
def __len__(self):
num_data = len(self.data_paths)
return num_data
class VoxelizationDataset(VoxelizationDatasetBase):
"""This dataset loads RGB point clouds and their labels as a list of points
and voxelizes the pointcloud with sufficient data augmentation.
"""
# Voxelization arguments
VOXEL_SIZE = 0.05 # 5cm
# Coordinate Augmentation Arguments: Unlike feature augmentation, coordinate
# augmentation has to be done before voxelization
SCALE_AUGMENTATION_BOUND = (0.9, 1.1)
ROTATION_AUGMENTATION_BOUND = ((-np.pi / 6, np.pi / 6), (-np.pi, np.pi), (-np.pi / 6, np.pi / 6))
TRANSLATION_AUGMENTATION_RATIO_BOUND = ((-0.2, 0.2), (-0.05, 0.05), (-0.2, 0.2))
ELASTIC_DISTORT_PARAMS = None
# MISC.
PREVOXELIZATION_VOXEL_SIZE = None
# Augment coords to feats
AUGMENT_COORDS_TO_FEATS = False
def __init__(self,
data_paths,
prevoxel_transform=None,
input_transform=None,
target_transform=None,
data_root='/',
ignore_label=255,
return_transformation=False,
augment_data=False,
config=None,
**kwargs):
self.augment_data = augment_data
self.config = config
VoxelizationDatasetBase.__init__(
self,
data_paths,
prevoxel_transform=prevoxel_transform,
input_transform=input_transform,
target_transform=target_transform,
cache=cache,
data_root=data_root,
ignore_mask=ignore_label,
return_transformation=return_transformation)
# Prevoxel transformations
self.voxelizer = Voxelizer(
voxel_size=self.VOXEL_SIZE,
clip_bound=self.CLIP_BOUND,
use_augmentation=augment_data,
scale_augmentation_bound=self.SCALE_AUGMENTATION_BOUND,
rotation_augmentation_bound=self.ROTATION_AUGMENTATION_BOUND,
translation_augmentation_ratio_bound=self.TRANSLATION_AUGMENTATION_RATIO_BOUND,
ignore_label=ignore_label)
# map labels not evaluated to ignore_label
label_map = {}
n_used = 0
for l in range(self.NUM_LABELS):
if l in self.IGNORE_LABELS:
label_map[l] = self.ignore_mask
else:
label_map[l] = n_used
n_used += 1
label_map[self.ignore_mask] = self.ignore_mask
self.label_map = label_map
self.NUM_LABELS -= len(self.IGNORE_LABELS)
def _augment_coords_to_feats(self, coords, feats, labels=None):
norm_coords = coords - coords.mean(0)
# color must come first.
if isinstance(coords, np.ndarray):
feats = np.concatenate((feats, norm_coords), 1)
else:
feats = torch.cat((feats, norm_coords), 1)
return coords, feats, labels
def convert_mat2cfl(self, mat):
# Generally, xyz,rgb,label
return mat[:, :3], mat[:, 3:-1], mat[:, -1]
def get_instance_info(self, xyz, instance_ids):
'''
:param xyz: (n, 3)
:param instance_ids: (n), int, (1~nInst, -1)
:return: instance_num, dict
'''
centers = np.ones((xyz.shape[0], 3), dtype=np.float32) * -1 # (n, 9), float, (cx, cy, cz, minx, miny, minz, maxx, maxy, maxz, occ, num_instances)
occupancy = {} # (nInst), int
bbox = {}
unique_ids = np.unique(instance_ids)
for id_ in unique_ids:
if id_ == -1:
continue
mask = (instance_ids == id_)
xyz_ = xyz[mask]
bbox_min = xyz_.min(0)
bbox_max = xyz_.max(0)
center = xyz_.mean(0)
centers[mask] = center
occupancy[id_] = mask.sum()
bbox[id_] = np.concatenate([bbox_min, bbox_max])
return {"ids": instance_ids, "center": centers, "occupancy": occupancy, "bbox": bbox}
def __getitem__(self, index):
coords, feats, labels, instances = self.load_data(index)
# Downsample the pointcloud with finer voxel size before transformation for memory and speed
if self.PREVOXELIZATION_VOXEL_SIZE is not None:
inds = ME.utils.sparse_quantize(
coords / self.PREVOXELIZATION_VOXEL_SIZE, return_index=True)
coords = coords[inds]
feats = feats[inds]
labels = labels[inds]
instances = instances[inds]
# Prevoxel transformations
if self.prevoxel_transform is not None:
coords, feats, labels = self.prevoxel_transform(coords, feats, labels)
coords, feats, labels, instances, transformation = self.voxelizer.voxelize(
coords, feats, labels, instances)
#import ipdb; ipdb.set_trace()
#from lib.pc_utils import save_point_cloud
#save_point_cloud(coords, 'test.ply')
# map labels not used for evaluation to ignore_label
if self.input_transform is not None:
coords, feats, labels, instances = self.input_transform(coords, feats, labels, instances)
if self.target_transform is not None:
coords, feats, labels, instances = self.target_transform(coords, feats, labels, instances)
if self.augment_data:
# For some networks, making the network invariant to even, odd coords is important
coords += (torch.rand(3) * 100).int().numpy()
#----------------Instances-------------------------
instance_info = instances
condition = (labels == self.ignore_mask)
instances[condition] = -1
IGNORE_LABELS_INSTANCE = self.IGNORE_LABELS if self.config.misc.train_stuff else self.IGNORE_LABELS_INSTANCE
for ignore_id in IGNORE_LABELS_INSTANCE:
condition = (labels == ignore_id)
instances[condition] = -1
instance_info = self.get_instance_info(coords, instances)
# ------------- label mapping --------------------
if self.IGNORE_LABELS is not None:
labels = np.array([self.label_map[x] for x in labels], dtype=np.int)
# Use coordinate features if config is set
if self.AUGMENT_COORDS_TO_FEATS:
coords, feats, labels = self._augment_coords_to_feats(coords, feats, labels)
return_args = [coords, feats, labels, instance_info]
if self.return_transformation:
return_args.append(transformation.astype(np.float32))
return tuple(return_args)
class TemporalVoxelizationDataset(VoxelizationDataset):
IS_TEMPORAL = True
def __init__(self,
data_paths,
prevoxel_transform=None,
input_transform=None,
target_transform=None,
data_root='/',
ignore_label=255,
temporal_dilation=1,
temporal_numseq=3,
return_transformation=False,
augment_data=False,
config=None,
**kwargs):
VoxelizationDataset.__init__(
self,
data_paths,
prevoxel_transform=prevoxel_transform,
input_transform=input_transform,
target_transform=target_transform,
data_root=data_root,
ignore_label=ignore_label,
return_transformation=return_transformation,
augment_data=augment_data,
config=config,
**kwargs)
self.temporal_dilation = temporal_dilation
self.temporal_numseq = temporal_numseq
temporal_window = temporal_dilation * (temporal_numseq - 1) + 1
self.numels = [len(p) - temporal_window + 1 for p in self.data_paths]
if any([numel <= 0 for numel in self.numels]):
raise ValueError('Your temporal window configuration is too wide for '
'this dataset. Please change the configuration.')
def load_world_pointcloud(self, filename):
raise NotImplementedError
def __getitem__(self, index):
for seq_idx, numel in enumerate(self.numels):
if index >= numel:
index -= numel
else:
break
numseq = self.temporal_numseq
if self.augment_data and self.config.data.temporal_rand_numseq:
numseq = random.randrange(1, self.temporal_numseq + 1)
dilations = [self.temporal_dilation for i in range(numseq - 1)]
if self.augment_data and self.config.data.temporal_rand_dilation:
dilations = [random.randrange(1, self.temporal_dilation + 1) for i in range(numseq - 1)]
files = [self.data_paths[seq_idx][index + sum(dilations[:i])] for i in range(numseq)]
world_pointclouds = [self.load_world_pointcloud(f) for f in files]
ptcs, centers = zip(*world_pointclouds)
# Downsample pointcloud for speed and memory
if self.PREVOXELIZATION_VOXEL_SIZE is not None:
new_ptcs = []
for ptc in ptcs:
inds = ME.utils.sparse_quantize(
ptc[:, :3] / self.PREVOXELIZATION_VOXEL_SIZE, return_index=True)
new_ptcs.append(ptc[inds])
ptcs = new_ptcs
# Apply prevoxel transformations
ptcs = [self.prevoxel_transform(ptc) for ptc in ptcs]
coords, feats, labels = zip(*ptcs)
outs = self.voxelizer.voxelize_temporal(
coords, feats, labels, centers=centers, return_transformation=self.return_transformation)
if self.return_transformation:
coords_t, feats_t, labels_t, transformation_t = outs
else:
coords_t, feats_t, labels_t = outs
joint_coords = np.vstack([
np.hstack((coords, np.ones((coords.shape[0], 1)) * i)) for i, coords in enumerate(coords_t)
])
joint_feats = np.vstack(feats_t)
joint_labels = np.hstack(labels_t)
# map labels not used for evaluation to ignore_label
if self.input_transform is not None:
joint_coords, joint_feats, joint_labels = self.input_transform(joint_coords, joint_feats,
joint_labels)
if self.target_transform is not None:
joint_coords, joint_feats, joint_labels = self.target_transform(joint_coords, joint_feats,
joint_labels)
if self.IGNORE_LABELS is not None:
joint_labels = np.array([self.label_map[x] for x in joint_labels], dtype=np.int)
return_args = [joint_coords, joint_feats, joint_labels]
if self.return_transformation:
pointclouds = np.vstack([
np.hstack((pointcloud[0][:, :6], np.ones((pointcloud[0].shape[0], 1)) * i))
for i, pointcloud in enumerate(world_pointclouds)
])
transformations = np.vstack(
[np.hstack((transformation, [i])) for i, transformation in enumerate(transformation_t)])
return_args.extend([pointclouds.astype(np.float32), transformations.astype(np.float32)])
return tuple(return_args)
def __len__(self):
num_data = sum(self.numels)
return num_data
def initialize_data_loader(DatasetClass,
config,
phase,
num_workers,
shuffle,
repeat,
augment_data,
batch_size,
limit_numpoints,
input_transform=None,
target_transform=None):
if isinstance(phase, str):
phase = str2datasetphase_type(phase)
if config.data.return_transformation:
collate_fn = t.cflt_collate_fn_factory(limit_numpoints)
else:
collate_fn = t.cfl_collate_fn_factory(limit_numpoints)
prevoxel_transform_train = []
if augment_data:
prevoxel_transform_train.append(t.ElasticDistortion(DatasetClass.ELASTIC_DISTORT_PARAMS))
if len(prevoxel_transform_train) > 0:
prevoxel_transforms = t.Compose(prevoxel_transform_train)
else:
prevoxel_transforms = None
input_transforms = []
if input_transform is not None:
input_transforms += input_transform
if augment_data:
input_transforms += [
t.RandomDropout(0.2),
t.RandomHorizontalFlip(DatasetClass.ROTATION_AXIS, DatasetClass.IS_TEMPORAL),
t.ChromaticAutoContrast(),
t.ChromaticTranslation(config.augmentation.data_aug_color_trans_ratio),
t.ChromaticJitter(config.augmentation.data_aug_color_jitter_std),
# t.HueSaturationTranslation(config.data_aug_hue_max, config.data_aug_saturation_max),
]
if len(input_transforms) > 0:
input_transforms = t.Compose(input_transforms)
else:
input_transforms = None
dataset = DatasetClass(
config,
prevoxel_transform=prevoxel_transforms,
input_transform=input_transforms,
target_transform=target_transform,
cache=config.data.cache_data,
augment_data=augment_data,
phase=phase)
data_args = {
'dataset': dataset,
'num_workers': num_workers,
'batch_size': batch_size,
'collate_fn': collate_fn,
}
if repeat:
if get_world_size() > 1:
data_args['sampler'] = DistributedInfSampler(dataset, shuffle=shuffle) # torch.utils.data.distributed.DistributedSampler(dataset)
else:
data_args['sampler'] = InfSampler(dataset, shuffle)
else:
data_args['shuffle'] = shuffle
data_loader = DataLoader(**data_args)
return data_loader
| ContrastiveSceneContexts-main | downstream/insseg/datasets/dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
import numpy as np
from collections import defaultdict
from scipy import spatial
import torch
from plyfile import PlyData
from lib.utils import read_txt, fast_hist, per_class_iu
from datasets.dataset import VoxelizationDataset, DatasetPhase, str2datasetphase_type, cache
import datasets.transforms as t
class StanfordVoxelizationDatasetBase:
# added
NUM_LABELS = 14
CLASS_LABELS = ('clutter', 'beam', 'board', 'bookcase', 'ceiling', 'chair', 'column',
'door', 'floor', 'sofa', 'table', 'wall', 'window')
VALID_CLASS_IDS = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13)
IGNORE_LABELS = tuple(set(range(14)) - set(VALID_CLASS_IDS))
CLASS_LABELS_INSTANCE = ('clutter', 'beam', 'board', 'bookcase', 'chair', 'column', 'door', 'sofa', 'table', 'window')
VALID_CLASS_IDS_INSTANCE = (0, 1, 2, 3, 5, 6, 7, 9, 11, 13)
IGNORE_LABELS_INSTANCE = tuple(set(range(14)) - set(VALID_CLASS_IDS_INSTANCE))
#---------
CLIP_SIZE = None
CLIP_BOUND = None
LOCFEAT_IDX = 2
ROTATION_AXIS = 'z'
#IGNORE_LABELS = (10,) # remove stairs, following SegCloud
# CLASSES = [
# 'clutter', 'beam', 'board', 'bookcase', 'ceiling', 'chair', 'column', 'door', 'floor', 'sofa',
# 'table', 'wall', 'window'
# ]
IS_FULL_POINTCLOUD_EVAL = True
DATA_PATH_FILE = {
DatasetPhase.Train: 'train.txt',
DatasetPhase.Val: 'val.txt',
DatasetPhase.TrainVal: 'trainval.txt',
DatasetPhase.Test: 'test.txt'
}
def test_pointcloud(self, pred_dir):
print('Running full pointcloud evaluation.')
# Join room by their area and room id.
room_dict = defaultdict(list)
for i, data_path in enumerate(self.data_paths):
area, room = data_path.split(os.sep)
room, _ = os.path.splitext(room)
room_id = '_'.join(room.split('_')[:-1])
room_dict[(area, room_id)].append(i)
# Test independently for each room.
sys.setrecursionlimit(100000) # Increase recursion limit for k-d tree.
pred_list = sorted(os.listdir(pred_dir))
hist = np.zeros((self.NUM_LABELS, self.NUM_LABELS))
for room_idx, room_list in enumerate(room_dict.values()):
print(f'Evaluating room {room_idx} / {len(room_dict)}.')
# Join all predictions and query pointclouds of split data.
pred = np.zeros((0, 4))
pointcloud = np.zeros((0, 7))
for i in room_list:
pred = np.vstack((pred, np.load(os.path.join(pred_dir, pred_list[i]))))
pointcloud = np.vstack((pointcloud, self.load_ply(i)[0]))
# Deduplicate all query pointclouds of split data.
pointcloud = np.array(list(set(tuple(l) for l in pointcloud.tolist())))
# Run test for each room.
pred_tree = spatial.KDTree(pred[:, :3], leafsize=500)
_, result = pred_tree.query(pointcloud[:, :3])
ptc_pred = pred[result, 3].astype(int)
ptc_gt = pointcloud[:, -1].astype(int)
if self.IGNORE_LABELS:
ptc_pred = self.label2masked[ptc_pred]
ptc_gt = self.label2masked[ptc_gt]
hist += fast_hist(ptc_pred, ptc_gt, self.NUM_LABELS)
# Print results.
ious = []
print('Per class IoU:')
for i, iou in enumerate(per_class_iu(hist) * 100):
result_str = ''
if hist.sum(1)[i]:
result_str += f'{iou}'
ious.append(iou)
else:
result_str += 'N/A' # Do not print if data not in ground truth.
print(result_str)
print(f'Average IoU: {np.nanmean(ious)}')
def _augment_coords_to_feats(self, coords, feats, labels=None):
# Center x,y
coords_center = coords.mean(0, keepdims=True)
coords_center[0, 2] = 0
norm_coords = coords - coords_center
feats = np.concatenate((feats, norm_coords), 1)
return coords, feats, labels
class StanfordDataset(StanfordVoxelizationDatasetBase, VoxelizationDataset):
# Voxelization arguments
VOXEL_SIZE = 0.05 # 5cm
CLIP_BOUND = 4 # [-N, N]
TEST_CLIP_BOUND = None
# Augmentation arguments
ROTATION_AUGMENTATION_BOUND = \
((-np.pi / 32, np.pi / 32), (-np.pi / 32, np.pi / 32), (-np.pi, np.pi))
TRANSLATION_AUGMENTATION_RATIO_BOUND = ((-0.2, 0.2), (-0.2, 0.2), (-0.05, 0.05))
# AUGMENT_COORDS_TO_FEATS = True
# NUM_IN_CHANNEL = 6
AUGMENT_COORDS_TO_FEATS = False
NUM_IN_CHANNEL = 3
def __init__(self,
config,
prevoxel_transform=None,
input_transform=None,
target_transform=None,
cache=False,
augment_data=True,
elastic_distortion=False,
phase=DatasetPhase.Train):
if isinstance(phase, str):
phase = str2datasetphase_type(phase)
if phase not in [DatasetPhase.Train, DatasetPhase.TrainVal]:
self.CLIP_BOUND = self.TEST_CLIP_BOUND
data_root = config.data.stanford3d_path
if isinstance(self.DATA_PATH_FILE[phase], (list, tuple)):
data_paths = []
for split in self.DATA_PATH_FILE[phase]:
data_paths += read_txt(os.path.join(data_root, 'splits', split))
else:
data_paths = read_txt(os.path.join(data_root, 'splits', self.DATA_PATH_FILE[phase]))
if config.data.voxel_size:
self.VOXEL_SIZE = config.data.voxel_size
logging.info('voxel size: {}'.format(self.VOXEL_SIZE))
logging.info('Loading {} {}: {}'.format(self.__class__.__name__, phase,
self.DATA_PATH_FILE[phase]))
VoxelizationDataset.__init__(
self,
data_paths,
data_root=data_root,
prevoxel_transform=prevoxel_transform,
input_transform=input_transform,
target_transform=target_transform,
ignore_label=config.data.ignore_label,
return_transformation=config.data.return_transformation,
augment_data=augment_data,
elastic_distortion=elastic_distortion,
config=config)
@cache
def load_ply(self, index):
filepath = self.data_root / self.data_paths[index]
plydata = PlyData.read(filepath)
data = plydata.elements[0].data
coords = np.array([data['x'], data['y'], data['z']], dtype=np.float32).T
feats = np.array([data['red'], data['green'], data['blue']], dtype=np.float32).T
labels = np.array(data['label'], dtype=np.int32)
return coords, feats, labels, None
@cache
def load_data(self, index):
filepath = self.data_root / self.data_paths[index]
pointcloud = torch.load(filepath)
coords = pointcloud[:,:3].astype(np.float32)
feats = pointcloud[:,3:6].astype(np.float32)
labels = pointcloud[:,6].astype(np.int32)
instances = pointcloud[:,7].astype(np.int32)
return coords, feats, labels, instances
class StanfordArea5Dataset(StanfordDataset):
DATA_PATH_FILE = {
DatasetPhase.Train: ['area1.txt', 'area2.txt', 'area3.txt', 'area4.txt', 'area6.txt'],
DatasetPhase.Val: 'area5.txt',
DatasetPhase.Test: 'area5.txt'
}
class StanfordArea53cmDataset(StanfordArea5Dataset):
CLIP_BOUND = 3.2
VOXEL_SIZE = 0.03
class StanfordArea57d5cmDataset(StanfordArea5Dataset):
VOXEL_SIZE = 0.075
class StanfordArea510cmDataset(StanfordArea5Dataset):
VOXEL_SIZE = 0.1
def test(config):
"""Test point cloud data loader.
"""
from torch.utils.data import DataLoader
from lib.utils import Timer
import open3d as o3d
def make_pcd(coords, feats):
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(coords[:, :3].float().numpy())
pcd.colors = o3d.utility.Vector3dVector(feats[:, :3].numpy() / 255)
return pcd
timer = Timer()
DatasetClass = StanfordArea5Dataset
transformations = [
t.RandomHorizontalFlip(DatasetClass.ROTATION_AXIS, DatasetClass.IS_TEMPORAL),
t.ChromaticAutoContrast(),
t.ChromaticTranslation(config.data_aug_color_trans_ratio),
t.ChromaticJitter(config.data_aug_color_jitter_std),
]
dataset = DatasetClass(
config,
prevoxel_transform=t.ElasticDistortion(DatasetClass.ELASTIC_DISTORT_PARAMS),
input_transform=t.Compose(transformations),
augment_data=True,
cache=True,
elastic_distortion=True)
data_loader = DataLoader(
dataset=dataset,
collate_fn=t.cfl_collate_fn_factory(limit_numpoints=False),
batch_size=1,
shuffle=True)
# Start from index 1
iter = data_loader.__iter__()
for i in range(100):
timer.tic()
coords, feats, labels = iter.next()
pcd = make_pcd(coords, feats)
o3d.visualization.draw_geometries([pcd])
print(timer.toc())
if __name__ == '__main__':
from config import get_config
config = get_config()
test(config)
| ContrastiveSceneContexts-main | downstream/insseg/datasets/stanford.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import collections
import numpy as np
import MinkowskiEngine as ME
from scipy.linalg import expm, norm
# Rotation matrix along axis with angle theta
def M(axis, theta):
return expm(np.cross(np.eye(3), axis / norm(axis) * theta))
class Voxelizer:
def __init__(self,
voxel_size=1,
clip_bound=None,
use_augmentation=False,
scale_augmentation_bound=None,
rotation_augmentation_bound=None,
translation_augmentation_ratio_bound=None,
ignore_label=255):
"""
Args:
voxel_size: side length of a voxel
clip_bound: boundary of the voxelizer. Points outside the bound will be deleted
expects either None or an array like ((-100, 100), (-100, 100), (-100, 100)).
scale_augmentation_bound: None or (0.9, 1.1)
rotation_augmentation_bound: None or ((np.pi / 6, np.pi / 6), None, None) for 3 axis.
Use random order of x, y, z to prevent bias.
translation_augmentation_bound: ((-5, 5), (0, 0), (-10, 10))
ignore_label: label assigned for ignore (not a training label).
"""
self.voxel_size = voxel_size
self.clip_bound = clip_bound
self.ignore_label = ignore_label
# Augmentation
self.use_augmentation = use_augmentation
self.scale_augmentation_bound = scale_augmentation_bound
self.rotation_augmentation_bound = rotation_augmentation_bound
self.translation_augmentation_ratio_bound = translation_augmentation_ratio_bound
def get_transformation_matrix(self):
voxelization_matrix, rotation_matrix = np.eye(4), np.eye(4)
# Get clip boundary from config or pointcloud.
# Get inner clip bound to crop from.
# Transform pointcloud coordinate to voxel coordinate.
# 1. Random rotation
rot_mat = np.eye(3)
if self.use_augmentation and self.rotation_augmentation_bound is not None:
if isinstance(self.rotation_augmentation_bound, collections.Iterable):
rot_mats = []
for axis_ind, rot_bound in enumerate(self.rotation_augmentation_bound):
theta = 0
axis = np.zeros(3)
axis[axis_ind] = 1
if rot_bound is not None:
theta = np.random.uniform(*rot_bound)
rot_mats.append(M(axis, theta))
# Use random order
np.random.shuffle(rot_mats)
rot_mat = rot_mats[0] @ rot_mats[1] @ rot_mats[2]
else:
raise ValueError()
rotation_matrix[:3, :3] = rot_mat
# 2. Scale and translate to the voxel space.
scale = 1 / self.voxel_size
if self.use_augmentation and self.scale_augmentation_bound is not None:
scale *= np.random.uniform(*self.scale_augmentation_bound)
np.fill_diagonal(voxelization_matrix[:3, :3], scale)
# Get final transformation matrix.
return voxelization_matrix, rotation_matrix
def clip(self, coords, center=None, trans_aug_ratio=None):
bound_min = np.min(coords, 0).astype(float)
bound_max = np.max(coords, 0).astype(float)
bound_size = bound_max - bound_min
if center is None:
center = bound_min + bound_size * 0.5
if trans_aug_ratio is not None:
trans = np.multiply(trans_aug_ratio, bound_size)
center += trans
lim = self.clip_bound
if isinstance(self.clip_bound, (int, float)):
if bound_size.max() < self.clip_bound:
return None
else:
clip_inds = ((coords[:, 0] >= (-lim + center[0])) & \
(coords[:, 0] < (lim + center[0])) & \
(coords[:, 1] >= (-lim + center[1])) & \
(coords[:, 1] < (lim + center[1])) & \
(coords[:, 2] >= (-lim + center[2])) & \
(coords[:, 2] < (lim + center[2])))
return clip_inds
# Clip points outside the limit
clip_inds = ((coords[:, 0] >= (lim[0][0] + center[0])) & \
(coords[:, 0] < (lim[0][1] + center[0])) & \
(coords[:, 1] >= (lim[1][0] + center[1])) & \
(coords[:, 1] < (lim[1][1] + center[1])) & \
(coords[:, 2] >= (lim[2][0] + center[2])) & \
(coords[:, 2] < (lim[2][1] + center[2])))
return clip_inds
def voxelize(self, coords, feats, labels, instances, center=None):
assert coords.shape[1] == 3 and coords.shape[0] == feats.shape[0] and coords.shape[0]
if self.clip_bound is not None:
trans_aug_ratio = np.zeros(3)
if self.use_augmentation and self.translation_augmentation_ratio_bound is not None:
for axis_ind, trans_ratio_bound in enumerate(self.translation_augmentation_ratio_bound):
trans_aug_ratio[axis_ind] = np.random.uniform(*trans_ratio_bound)
clip_inds = self.clip(coords, center, trans_aug_ratio)
if clip_inds is not None:
coords, feats = coords[clip_inds], feats[clip_inds]
if labels is not None:
labels = labels[clip_inds]
if instances is not None:
instances = instances[clip_inds]
# Get rotation and scale
M_v, M_r = self.get_transformation_matrix()
# Apply transformations
rigid_transformation = M_v
if self.use_augmentation:
rigid_transformation = M_r @ rigid_transformation
homo_coords = np.hstack((coords, np.ones((coords.shape[0], 1), dtype=coords.dtype)))
coords_aug = np.floor(homo_coords @ rigid_transformation.T[:, :3])
# Align all coordinates to the origin.
min_coords = coords_aug.min(0)
M_t = np.eye(4)
M_t[:3, -1] = -min_coords
rigid_transformation = M_t @ rigid_transformation
coords_aug = np.floor(coords_aug - min_coords)
# key = self.hash(coords_aug) # floor happens by astype(np.uint64)
mapping, colabels = ME.utils.sparse_quantize(
coords_aug, feats, labels=labels, return_index=True, ignore_label=self.ignore_label)
coords_aug = coords_aug[mapping]
feats = feats[mapping]
labels = colabels
instances = instances[mapping]
return coords_aug, feats, labels, instances, rigid_transformation.flatten()
def voxelize_temporal(self,
coords_t,
feats_t,
labels_t,
centers=None,
return_transformation=False):
# Legacy code, remove
if centers is None:
centers = [
None,
] * len(coords_t)
coords_tc, feats_tc, labels_tc, transformation_tc = [], [], [], []
# ######################### Data Augmentation #############################
# Get rotation and scale
M_v, M_r = self.get_transformation_matrix()
# Apply transformations
rigid_transformation = M_v
if self.use_augmentation:
rigid_transformation = M_r @ rigid_transformation
# ######################### Voxelization #############################
# Voxelize coords
for coords, feats, labels, center in zip(coords_t, feats_t, labels_t, centers):
###################################
# Clip the data if bound exists
if self.clip_bound is not None:
trans_aug_ratio = np.zeros(3)
if self.use_augmentation and self.translation_augmentation_ratio_bound is not None:
for axis_ind, trans_ratio_bound in enumerate(self.translation_augmentation_ratio_bound):
trans_aug_ratio[axis_ind] = np.random.uniform(*trans_ratio_bound)
clip_inds = self.clip(coords, center, trans_aug_ratio)
if clip_inds is not None:
coords, feats = coords[clip_inds], feats[clip_inds]
if labels is not None:
labels = labels[clip_inds]
###################################
homo_coords = np.hstack((coords, np.ones((coords.shape[0], 1), dtype=coords.dtype)))
coords_aug = np.floor(homo_coords @ rigid_transformation.T)[:, :3]
coords_aug, feats, labels = ME.utils.sparse_quantize(
coords_aug, feats, labels=labels, ignore_label=self.ignore_label)
coords_tc.append(coords_aug)
feats_tc.append(feats)
labels_tc.append(labels)
transformation_tc.append(rigid_transformation.flatten())
return_args = [coords_tc, feats_tc, labels_tc]
if return_transformation:
return_args.append(transformation_tc)
return tuple(return_args)
def test():
N = 16575
coords = np.random.rand(N, 3) * 10
feats = np.random.rand(N, 4)
labels = np.floor(np.random.rand(N) * 3)
coords[:3] = 0
labels[:3] = 2
voxelizer = Voxelizer()
print(voxelizer.voxelize(coords, feats, labels))
if __name__ == '__main__':
test()
| ContrastiveSceneContexts-main | downstream/insseg/datasets/voxelizer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.distributed as dist
from torch.utils.data.sampler import Sampler
class InfSampler(Sampler):
"""Samples elements randomly, without replacement.
Arguments:
data_source (Dataset): dataset to sample from
"""
def __init__(self, data_source, shuffle=False):
self.data_source = data_source
self.shuffle = shuffle
self.reset_permutation()
def reset_permutation(self):
perm = len(self.data_source)
if self.shuffle:
perm = torch.randperm(perm)
self._perm = perm.tolist()
def __iter__(self):
return self
def __next__(self):
if len(self._perm) == 0:
self.reset_permutation()
return self._perm.pop()
def __len__(self):
return len(self.data_source)
next = __next__ # Python 2 compatibility
class DistributedInfSampler(InfSampler):
def __init__(self, data_source, num_replicas=None, rank=None, shuffle=True):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.data_source = data_source
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.it = 0
self.num_samples = int(math.ceil(len(self.data_source) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
self.shuffle = shuffle
self.reset_permutation()
def __next__(self):
it = self.it * self.num_replicas + self.rank
value = self._perm[it % len(self._perm)]
self.it = self.it + 1
if (self.it * self.num_replicas) >= len(self._perm):
self.reset_permutation()
self.it = 0
return value
def __len__(self):
return self.num_samples | ContrastiveSceneContexts-main | downstream/insseg/datasets/dataloader.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
from pathlib import Path
import torch
import numpy as np
from scipy import spatial
from datasets.dataset import VoxelizationDataset, DatasetPhase, str2datasetphase_type
from lib.pc_utils import read_plyfile, save_point_cloud
from lib.utils import read_txt, fast_hist, per_class_iu
from lib.io3d import write_triangle_mesh, create_color_palette
class ScannetVoxelizationDataset(VoxelizationDataset):
# added
NUM_LABELS = 41 # Will be converted to 20 as defined in IGNORE_LABELS.
NUM_IN_CHANNEL = 3
CLASS_LABELS = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window',
'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator',
'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture')
VALID_CLASS_IDS = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39)
IGNORE_LABELS = tuple(set(range(NUM_LABELS)) - set(VALID_CLASS_IDS))
CLASS_LABELS_INSTANCE = ['cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter',
'desk', 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture']
VALID_CLASS_IDS_INSTANCE = np.array([3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])
IGNORE_LABELS_INSTANCE = tuple(set(range(NUM_LABELS)) - set(VALID_CLASS_IDS_INSTANCE))
# Voxelization arguments
CLIP_BOUND = None
TEST_CLIP_BOUND = None
VOXEL_SIZE = 0.05
# Augmentation arguments
ROTATION_AUGMENTATION_BOUND = ((-np.pi / 64, np.pi / 64), (-np.pi / 64, np.pi / 64), (-np.pi,
np.pi))
TRANSLATION_AUGMENTATION_RATIO_BOUND = ((-0.2, 0.2), (-0.2, 0.2), (0, 0))
ELASTIC_DISTORT_PARAMS = ((0.2, 0.4), (0.8, 1.6))
ROTATION_AXIS = 'z'
LOCFEAT_IDX = 2
IS_FULL_POINTCLOUD_EVAL = True
# If trainval.txt does not exist, copy train.txt and add contents from val.txt
DATA_PATH_FILE = {
DatasetPhase.Train: 'scannetv2_train.txt',
DatasetPhase.Val: 'scannetv2_val.txt',
DatasetPhase.TrainVal: 'scannetv2_trainval.txt',
DatasetPhase.Test: 'scannetv2_test.txt',
DatasetPhase.Debug: 'debug.txt'
}
def __init__(self,
config,
prevoxel_transform=None,
input_transform=None,
target_transform=None,
augment_data=True,
elastic_distortion=False,
cache=False,
phase=DatasetPhase.Train):
if isinstance(phase, str):
phase = str2datasetphase_type(phase)
# Use cropped rooms for train/val
data_root = config.data.scannet_path
if phase not in [DatasetPhase.Train, DatasetPhase.TrainVal]:
self.CLIP_BOUND = self.TEST_CLIP_BOUND
data_paths = read_txt(os.path.join(data_root, 'splits', self.DATA_PATH_FILE[phase]))
if phase == DatasetPhase.Train and config.data.train_file:
data_paths = read_txt(os.path.join(data_root, 'splits', config.data.train_file))
# data efficiency by sampling points
self.sampled_inds = {}
if config.data.sampled_inds and phase == DatasetPhase.Train:
self.sampled_inds = torch.load(config.data.sampled_inds)
data_paths = [data_path + '.pth' for data_path in data_paths]
logging.info('Loading {}: {}'.format(self.__class__.__name__, self.DATA_PATH_FILE[phase]))
super().__init__(
data_paths,
data_root=data_root,
prevoxel_transform=prevoxel_transform,
input_transform=input_transform,
target_transform=target_transform,
ignore_label=config.data.ignore_label,
return_transformation=config.data.return_transformation,
augment_data=augment_data,
elastic_distortion=elastic_distortion,
config=config)
def get_output_id(self, iteration):
return '_'.join(Path(self.data_paths[iteration]).stem.split('_')[:2])
def _augment_locfeat(self, pointcloud):
# Assuming that pointcloud is xyzrgb(...), append location feat.
pointcloud = np.hstack(
(pointcloud[:, :6], 100 * np.expand_dims(pointcloud[:, self.LOCFEAT_IDX], 1),
pointcloud[:, 6:]))
return pointcloud
def load_data(self, index):
filepath = self.data_root / self.data_paths[index]
pointcloud = torch.load(filepath)
coords = pointcloud[0].astype(np.float32)
feats = pointcloud[1].astype(np.float32)
labels = pointcloud[2].astype(np.int32)
instances = pointcloud[3].astype(np.int32)
if self.sampled_inds:
scene_name = self.get_output_id(index)
mask = np.ones_like(labels).astype(np.bool)
sampled_inds = self.sampled_inds[scene_name]
mask[sampled_inds] = False
labels[mask] = 0
instances[mask] = 0
return coords, feats, labels, instances
def get_original_pointcloud(self, coords, transformation, iteration):
logging.info('===> Start testing on original pointcloud space.')
data_path = self.data_paths[iteration]
fullply_f = self.data_root / data_path
query_xyz, _, query_label, _ = torch.load(fullply_f)
coords = coords[:, 1:].numpy() + 0.5
curr_transformation = transformation[0, :16].numpy().reshape(4, 4)
coords = np.hstack((coords, np.ones((coords.shape[0], 1))))
coords = (np.linalg.inv(curr_transformation) @ coords.T).T
# Run test for each room.
from pykeops.numpy import LazyTensor
from pykeops.numpy.utils import IsGpuAvailable
query_xyz = np.array(query_xyz)
x_i = LazyTensor( query_xyz[:,None,:] ) # x_i.shape = (1e6, 1, 3)
y_j = LazyTensor( coords[:,:3][None,:,:] ) # y_j.shape = ( 1, 2e6,3)
D_ij = ((x_i - y_j) ** 2).sum(-1) # (M**2, N) symbolic matrix of squared distances
indKNN = D_ij.argKmin(1, dim=1) # Grid <-> Samples, (M**2, K) integer tensor
inds = indKNN[:,0]
return inds, query_xyz
def save_prediction(self, coords, pred, transformation, iteration, save_dir):
print('Running full pointcloud evaluation.')
#if dataset.IGNORE_LABELS:
# decode_label_map = {}
# for k, v in dataset.label_map.items():
# decode_label_map[v] = k
# orig_pred = np.array([decode_label_map[x.item()] for x in orig_pred.cpu()], dtype=np.int)
inds_mapping, xyz = self.get_original_pointcloud(coords, transformation, iteration)
save = {'points': coords, 'mapping': inds_mapping, 'labels': pred}
# Save prediciton in txt format for submission.
room_id = self.get_output_id(iteration)
torch.save(save, os.path.join(save_dir, room_id))
#np.savetxt(f'{save_dir}/{room_id}.txt', ptc_pred, fmt='%i')
def save_groundtruth(self, coords, gt, transformation, iteration, save_dir):
save = {'points': coords, 'labels': gt}
# Save prediciton in txt format for submission.
room_id = self.get_output_id(iteration)
torch.save(save, os.path.join(save_dir, room_id))
class ScannetVoxelization2cmDataset(ScannetVoxelizationDataset):
VOXEL_SIZE = 0.02
| ContrastiveSceneContexts-main | downstream/insseg/datasets/scannet.py |
# Evaluates semantic label task
# Input:
# - path to .txt prediction files
# - path to .txt ground truth files
# - output file to write results to
# Note that only the valid classes are used for evaluation,
# i.e., any ground truth label not in the valid label set
# is ignored in the evaluation.
#
# example usage: evaluate_semantic_label.py --scan_path [path to scan data] --output_file [output file]
# python imports
import math
import logging
import os, sys, argparse
import inspect
try:
import numpy as np
except:
print("Failed to import numpy package.")
sys.exit(-1)
try:
from itertools import izip
except ImportError:
izip = zip
#currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
#parentdir = os.path.dirname(currentdir)
#sys.path.insert(0,parentdir)
from datasets.evaluation.scannet_benchmark_utils import util_3d
from datasets.evaluation.scannet_benchmark_utils import util
class Evaluator:
def __init__(self, CLASS_LABELS, VALID_CLASS_IDS):
#CLASS_LABELS = ['wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table',
# 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk',
# 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture']
#VALID_CLASS_IDS = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])
self.CLASS_LABELS = CLASS_LABELS
self.VALID_CLASS_IDS = VALID_CLASS_IDS
self.UNKNOWN_ID = np.max(VALID_CLASS_IDS) + 1
self.gt = {}
self.pred = {}
max_id = self.UNKNOWN_ID
self.confusion = np.zeros((max_id+1, max_id+1), dtype=np.ulonglong)
def update_confusion(self, pred_ids, gt_ids, sceneId=None):
# sanity checks
if not pred_ids.shape == gt_ids.shape:
util.print_error('%s: number of predicted values does not match number of vertices' % pred_file, user_fault=True)
n = self.confusion.shape[0]
k = (gt_ids >= 0) & (gt_ids < n)
temporal = np.bincount(n * gt_ids[k].astype(int) + pred_ids[k], minlength=n**2).reshape(n, n)
for valid_class_row in self.VALID_CLASS_IDS:
for valid_class_col in self.VALID_CLASS_IDS:
self.confusion[valid_class_row][valid_class_col] += temporal[valid_class_row][valid_class_col]
@staticmethod
def write_to_benchmark(base='benchmark_segmentation', sceneId=None, pred_ids=None):
os.makedirs(base, exist_ok=True)
util_3d.export_ids('{}.txt'.format(os.path.join(base, sceneId)), pred_ids)
def get_iou(self, label_id, confusion):
if not label_id in self.VALID_CLASS_IDS:
return float('nan')
# #true positives
tp = np.longlong(confusion[label_id, label_id])
# #false negatives
fn = np.longlong(confusion[label_id, :].sum()) - tp
# #false positives
not_ignored = [l for l in self.VALID_CLASS_IDS if not l == label_id]
fp = np.longlong(confusion[not_ignored, label_id].sum())
denom = (tp + fp + fn)
if denom == 0:
return float('nan')
return (float(tp) / denom, tp, denom)
def write_result_file(self, confusion, ious, filename):
with open(filename, 'w') as f:
f.write('iou scores\n')
for i in range(len(self.VALID_CLASS_IDS)):
label_id = self.VALID_CLASS_IDS[i]
label_name = self.CLASS_LABELS[i]
iou = ious[label_name][0]
f.write('{0:<14s}({1:<2d}): {2:>5.3f}\n'.format(label_name, label_id, iou))
f.write("{0:<14s}: {1:>5.3f}".format('mean', np.array([ious[k][0] for k in ious]).mean()))
f.write('\nconfusion matrix\n')
f.write('\t\t\t')
for i in range(len(self.VALID_CLASS_IDS)):
#f.write('\t{0:<14s}({1:<2d})'.format(CLASS_LABELS[i], VALID_CLASS_IDS[i]))
f.write('{0:<8d}'.format(self.VALID_CLASS_IDS[i]))
f.write('\n')
for r in range(len(self.VALID_CLASS_IDS)):
f.write('{0:<14s}({1:<2d})'.format(self.CLASS_LABELS[r], self.VALID_CLASS_IDS[r]))
for c in range(len(self.VALID_CLASS_IDS)):
f.write('\t{0:>5.3f}'.format(confusion[self.VALID_CLASS_IDS[r],self.VALID_CLASS_IDS[c]]))
f.write('\n')
print('wrote results to', filename)
def evaluate_confusion(self, output_file=None):
class_ious = {}
counter = 0
summation = 0
for i in range(len(self.VALID_CLASS_IDS)):
label_name = self.CLASS_LABELS[i]
label_id = self.VALID_CLASS_IDS[i]
class_ious[label_name] = self.get_iou(label_id, self.confusion)
# print
logging.info('classes IoU')
logging.info('----------------------------')
for i in range(len(self.VALID_CLASS_IDS)):
label_name = self.CLASS_LABELS[i]
try:
logging.info('{0:<14s}: {1:>5.3f} ({2:>6d}/{3:<6d})'.format(label_name, class_ious[label_name][0], class_ious[label_name][1], class_ious[label_name][2]))
summation += class_ious[label_name][0]
counter += 1
except:
logging.info('{0:<14s}: nan ( nan/nan )'.format(label_name))
logging.info("{0:<14s}: {1:>5.3f}".format('mean', summation / counter))
if output_file:
self.write_result_file(self.confusion, class_ious, output_file)
return summation / counter
def config():
parser = argparse.ArgumentParser()
parser.add_argument('--pred_path', required=True, help='path to directory of predicted .txt files')
parser.add_argument('--gt_path', required=True, help='path to gt files')
parser.add_argument('--output_file', type=str, default='./semantic_label_evaluation.txt')
opt = parser.parse_args()
return opt
def main():
opt = config()
#------------------------- ScanNet --------------------------
CLASS_LABELS = ['wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table',
'door', 'window', 'bookshelf', 'picture', 'counter', 'desk',
'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture']
VALID_CLASS_IDS = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])
evaluator = Evaluator(CLASS_LABELS=CLASS_LABELS, VALID_CLASS_IDS=VALID_CLASS_IDS)
print('reading', len(os.listdir(opt.pred_path))-1, 'scans...')
for i, pred_file in enumerate(os.listdir(opt.pred_path)):
if pred_file == 'semantic_label_evaluation.txt':
continue
gt_file = os.path.join(opt.gt_path, pred_file)
if not os.path.isfile(gt_file):
util.print_error('Result file {} does not match any gt file'.format(pred_file), user_fault=True)
gt_ids = util_3d.load_ids(gt_file)
pred_file = os.path.join(opt.pred_path, pred_file)
pred_ids = util_3d.load_ids(pred_file)
evaluator.update_confusion(pred_ids, gt_ids, pred_file.split('.')[0])
sys.stdout.write("\rscans processed: {}".format(i+1))
sys.stdout.flush()
# evaluate
evaluator.evaluate_confusion(opt.output_file)
if __name__ == '__main__':
main()
| ContrastiveSceneContexts-main | downstream/insseg/datasets/evaluation/evaluate_semantic_label.py |
# Evaluates semantic instance task
# Adapted from the CityScapes evaluation: https://github.com/mcordts/cityscapesScripts/tree/master/cityscapesscripts/evaluation
# Input:
# - path to .txt prediction files
# - path to .txt ground truth files
# - output file to write results to
# Each .txt prediction file look like:
# [(pred0) rel. path to pred. mask over verts as .txt] [(pred0) label id] [(pred0) confidence]
# [(pred1) rel. path to pred. mask over verts as .txt] [(pred1) label id] [(pred1) confidence]
# [(pred2) rel. path to pred. mask over verts as .txt] [(pred2) label id] [(pred2) confidence]
# ...
#
# NOTE: The prediction files must live in the root of the given prediction path.
# Predicted mask .txt files must live in a subfolder.
# Additionally, filenames must not contain spaces.
# The relative paths to predicted masks must contain one integer per line,
# where each line corresponds to vertices in the *_vh_clean_2.ply (in that order).
# Non-zero integers indicate part of the predicted instance.
# The label ids specify the class of the corresponding mask.
# Confidence is a float confidence score of the mask.
#
# Note that only the valid classes are used for evaluation,
# i.e., any ground truth label not in the valid label set
# is ignored in the evaluation.
#
# example usage: evaluate_semantic_instance.py --scan_path [path to scan data] --output_file [output file]
# python imports
import logging
import math
import os, sys, argparse
import inspect
from copy import deepcopy
import argparse
import numpy as np
#currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
#parentdir = os.path.dirname(currentdir)
#sys.path.insert(0,parentdir)
from datasets.evaluation.scannet_benchmark_utils import util_3d
from datasets.evaluation.scannet_benchmark_utils import util
def setup_logging():
ch = logging.StreamHandler(sys.stdout)
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(
format=os.uname()[1].split('.')[0] + ' %(asctime)s %(message)s',
datefmt='%m/%d %H:%M:%S',
handlers=[ch])
class Evaluator:
# ---------- Evaluation params ---------- #
# overlaps for evaluation
overlaps = np.append(np.arange(0.5,0.95,0.05), 0.25)
# minimum region size for evaluation [verts]
min_region_sizes = np.array( [ 10 ] )
# distance thresholds [m]
distance_threshes = np.array( [ float('inf') ] )
# distance confidences
distance_confs = np.array( [ -float('inf') ] )
def __init__(self, CLASS_LABELS, VALID_CLASS_IDS, benchmark=False):
# ---------- Label info ---------- #
#CLASS_LABELS = ['cabinet', 'bed', 'chair', 'sofa', 'table', 'door',
# 'window', 'bookshelf', 'picture', 'counter',
# 'desk', 'curtain', 'refrigerator', 'shower curtain',
# 'toilet', 'sink', 'bathtub', 'otherfurniture']
#VALID_CLASS_IDS = np.array([3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])
self.CLASS_LABELS = CLASS_LABELS
self.VALID_CLASS_IDS = VALID_CLASS_IDS
self.ID_TO_LABEL = {}
self.LABEL_TO_ID = {}
for i in range(len(VALID_CLASS_IDS)):
self.LABEL_TO_ID[CLASS_LABELS[i]] = VALID_CLASS_IDS[i]
self.ID_TO_LABEL[VALID_CLASS_IDS[i]] = CLASS_LABELS[i]
self.pred_instances = {}
self.gt_instances = {}
self.benchmark = benchmark
def evaluate_matches(self, matches):
# results: class x overlap
ap = np.zeros( (len(self.distance_threshes) , len(self.CLASS_LABELS) , len(self.overlaps)) , np.float )
for di, (min_region_size, distance_thresh, distance_conf) in enumerate(zip(self.min_region_sizes, self.distance_threshes, self.distance_confs)):
for oi, overlap_th in enumerate(self.overlaps):
pred_visited = {}
for m in matches:
for p in matches[m]['pred']:
for label_name in self.CLASS_LABELS:
for p in matches[m]['pred'][label_name]:
if 'filename' in p:
pred_visited[p['filename']] = False
for li, label_name in enumerate(self.CLASS_LABELS):
y_true = np.empty(0)
y_score = np.empty(0)
hard_false_negatives = 0
has_gt = False
has_pred = False
for m in matches:
pred_instances = matches[m]['pred'][label_name]
gt_instances = matches[m]['gt'][label_name]
# filter groups in ground truth
gt_instances = [ gt for gt in gt_instances if gt['instance_id']>=1000 and gt['vert_count']>=min_region_size and gt['med_dist']<=distance_thresh and gt['dist_conf']>=distance_conf ]
if gt_instances:
has_gt = True
if pred_instances:
has_pred = True
cur_true = np.ones ( len(gt_instances) )
cur_score = np.ones ( len(gt_instances) ) * (-float("inf"))
cur_match = np.zeros( len(gt_instances) , dtype=np.bool )
# collect matches
for (gti,gt) in enumerate(gt_instances):
found_match = False
num_pred = len(gt['matched_pred'])
for pred in gt['matched_pred']:
# greedy assignments
if pred_visited[pred['filename']]:
continue
overlap = float(pred['intersection']) / (gt['vert_count']+pred['vert_count']-pred['intersection'])
if overlap > overlap_th:
confidence = pred['confidence']
# if already have a prediction for this gt,
# the prediction with the lower score is automatically a false positive
if cur_match[gti]:
max_score = max( cur_score[gti] , confidence )
min_score = min( cur_score[gti] , confidence )
cur_score[gti] = max_score
# append false positive
cur_true = np.append(cur_true,0)
cur_score = np.append(cur_score,min_score)
cur_match = np.append(cur_match,True)
# otherwise set score
else:
found_match = True
cur_match[gti] = True
cur_score[gti] = confidence
pred_visited[pred['filename']] = True
if not found_match:
hard_false_negatives += 1
# remove non-matched ground truth instances
cur_true = cur_true [ cur_match==True ]
cur_score = cur_score[ cur_match==True ]
# collect non-matched predictions as false positive
for pred in pred_instances:
found_gt = False
for gt in pred['matched_gt']:
overlap = float(gt['intersection']) / (gt['vert_count']+pred['vert_count']-gt['intersection'])
if overlap > overlap_th:
found_gt = True
break
if not found_gt:
num_ignore = pred['void_intersection']
for gt in pred['matched_gt']:
# group?
if gt['instance_id'] < 1000:
num_ignore += gt['intersection']
# small ground truth instances
if gt['vert_count'] < min_region_size or gt['med_dist']>distance_thresh or gt['dist_conf']<distance_conf:
num_ignore += gt['intersection']
proportion_ignore = float(num_ignore)/pred['vert_count']
# if not ignored append false positive
if proportion_ignore <= overlap_th:
cur_true = np.append(cur_true,0)
confidence = pred["confidence"]
cur_score = np.append(cur_score,confidence)
# append to overall results
y_true = np.append(y_true,cur_true)
y_score = np.append(y_score,cur_score)
# compute average precision
if has_gt and has_pred:
# compute precision recall curve first
# sorting and cumsum
score_arg_sort = np.argsort(y_score)
y_score_sorted = y_score[score_arg_sort]
y_true_sorted = y_true[score_arg_sort]
y_true_sorted_cumsum = np.cumsum(y_true_sorted)
# unique thresholds
(thresholds,unique_indices) = np.unique( y_score_sorted , return_index=True )
num_prec_recall = len(unique_indices) + 1
# prepare precision recall
num_examples = len(y_score_sorted)
try:
num_true_examples = y_true_sorted_cumsum[-1]
except:
num_true_examples = 0
precision = np.zeros(num_prec_recall)
recall = np.zeros(num_prec_recall)
# deal with the first point
y_true_sorted_cumsum = np.append( y_true_sorted_cumsum , 0 )
# deal with remaining
for idx_res,idx_scores in enumerate(unique_indices):
cumsum = y_true_sorted_cumsum[idx_scores-1]
tp = num_true_examples - cumsum
fp = num_examples - idx_scores - tp
fn = cumsum + hard_false_negatives
p = float(tp)/(tp+fp)
r = float(tp)/(tp+fn)
precision[idx_res] = p
recall [idx_res] = r
# first point in curve is artificial
precision[-1] = 1.
recall [-1] = 0.
# compute average of precision-recall curve
recall_for_conv = np.copy(recall)
recall_for_conv = np.append(recall_for_conv[0], recall_for_conv)
recall_for_conv = np.append(recall_for_conv, 0.)
stepWidths = np.convolve(recall_for_conv,[-0.5,0,0.5],'valid')
# integrate is now simply a dot product
ap_current = np.dot(precision, stepWidths)
elif has_gt:
ap_current = 0.0
else:
ap_current = float('nan')
ap[di,li,oi] = ap_current
return ap
def compute_averages(self, aps):
d_inf = 0
o50 = np.where(np.isclose(self.overlaps,0.5))
o25 = np.where(np.isclose(self.overlaps,0.25))
oAllBut25 = np.where(np.logical_not(np.isclose(self.overlaps,0.25)))
avg_dict = {}
#avg_dict['all_ap'] = np.nanmean(aps[ d_inf,:,: ])
avg_dict['all_ap'] = np.nanmean(aps[ d_inf,:,oAllBut25])
avg_dict['all_ap_50%'] = np.nanmean(aps[ d_inf,:,o50])
avg_dict['all_ap_25%'] = np.nanmean(aps[ d_inf,:,o25])
avg_dict["classes"] = {}
for (li,label_name) in enumerate(self.CLASS_LABELS):
avg_dict["classes"][label_name] = {}
#avg_dict["classes"][label_name]["ap"] = np.average(aps[ d_inf,li, :])
avg_dict["classes"][label_name]["ap"] = np.average(aps[ d_inf,li,oAllBut25])
avg_dict["classes"][label_name]["ap50%"] = np.average(aps[ d_inf,li,o50])
avg_dict["classes"][label_name]["ap25%"] = np.average(aps[ d_inf,li,o25])
return avg_dict
def assign_instances_for_scan(self, scene_id):
# get gt instances
gt_ids = self.gt_instances[scene_id]
gt_instances = util_3d.get_instances(gt_ids, self.VALID_CLASS_IDS, self.CLASS_LABELS, self.ID_TO_LABEL)
# associate
gt2pred = deepcopy(gt_instances)
for label in gt2pred:
for gt in gt2pred[label]:
gt['matched_pred'] = []
pred2gt = {}
for label in self.CLASS_LABELS:
pred2gt[label] = []
num_pred_instances = 0
# mask of void labels in the groundtruth
bool_void = np.logical_not(np.in1d(gt_ids//1000, self.VALID_CLASS_IDS))
# go thru all prediction masks
for instance_id in self.pred_instances[scene_id]:
label_id = int(self.pred_instances[scene_id][instance_id]['label_id'])
conf = self.pred_instances[scene_id][instance_id]['conf']
if not label_id in self.ID_TO_LABEL:
continue
label_name = self.ID_TO_LABEL[label_id]
# read the mask
pred_mask = self.pred_instances[scene_id][instance_id]['pred_mask']
# convert to binary
num = np.count_nonzero(pred_mask)
if num < self.min_region_sizes[0]:
continue # skip if empty
pred_instance = {}
pred_instance['filename'] = str(scene_id) + '/' + str(instance_id)
pred_instance['pred_id'] = num_pred_instances
pred_instance['label_id'] = label_id
pred_instance['vert_count'] = num
pred_instance['confidence'] = conf
pred_instance['void_intersection'] = np.count_nonzero(np.logical_and(bool_void, pred_mask))
# matched gt instances
matched_gt = []
# go thru all gt instances with matching label
for (gt_num, gt_inst) in enumerate(gt2pred[label_name]):
intersection = np.count_nonzero(np.logical_and(gt_ids == gt_inst['instance_id'], pred_mask))
if intersection > 0:
gt_copy = gt_inst.copy()
pred_copy = pred_instance.copy()
gt_copy['intersection'] = intersection
pred_copy['intersection'] = intersection
matched_gt.append(gt_copy)
gt2pred[label_name][gt_num]['matched_pred'].append(pred_copy)
pred_instance['matched_gt'] = matched_gt
num_pred_instances += 1
pred2gt[label_name].append(pred_instance)
return gt2pred, pred2gt
def print_results(self, avgs):
sep = ""
col1 = ":"
lineLen = 64
logging.info("")
logging.info("#"*lineLen)
line = ""
line += "{:<15}".format("what" ) + sep + col1
line += "{:>15}".format("AP" ) + sep
line += "{:>15}".format("AP_50%" ) + sep
line += "{:>15}".format("AP_25%" ) + sep
logging.info(line)
logging.info("#"*lineLen)
for (li,label_name) in enumerate(self.CLASS_LABELS):
ap_avg = avgs["classes"][label_name]["ap"]
ap_50o = avgs["classes"][label_name]["ap50%"]
ap_25o = avgs["classes"][label_name]["ap25%"]
line = "{:<15}".format(label_name) + sep + col1
line += sep + "{:>15.3f}".format(ap_avg ) + sep
line += sep + "{:>15.3f}".format(ap_50o ) + sep
line += sep + "{:>15.3f}".format(ap_25o ) + sep
logging.info(line)
all_ap_avg = avgs["all_ap"]
all_ap_50o = avgs["all_ap_50%"]
all_ap_25o = avgs["all_ap_25%"]
logging.info("-"*lineLen)
line = "{:<15}".format("average") + sep + col1
line += "{:>15.3f}".format(all_ap_avg) + sep
line += "{:>15.3f}".format(all_ap_50o) + sep
line += "{:>15.3f}".format(all_ap_25o) + sep
logging.info(line)
logging.info("")
@staticmethod
def write_to_benchmark(output_path='benchmark_instance', scene_id=None, pred_inst={}):
os.makedirs(output_path, exist_ok=True)
os.makedirs(os.path.join(output_path, 'predicted_masks'), exist_ok=True)
f = open(os.path.join(output_path, scene_id + '.txt'), 'w')
for instance_id in pred_inst:
# for pred instance id starts from 0; in gt valid instance id starts from 1
score = pred_inst[instance_id]['conf']
label = pred_inst[instance_id]['label_id']
mask = pred_inst[instance_id]['pred_mask']
f.write('predicted_masks/{}_{:03d}.txt {} {:.4f}'.format(scene_id, instance_id, label, score))
if instance_id < len(pred_inst) - 1:
f.write('\n')
util_3d.export_ids(os.path.join(output_path, 'predicted_masks', scene_id + '_%03d.txt' % (instance_id)), mask)
f.close()
def add_prediction(self, instance_info, id):
self.pred_instances[id] = instance_info
def add_gt(self, instance_info, id):
self.gt_instances[id] = instance_info
# see scannet repo for generating gt data for val in benchmark format
def add_gt_in_benchmark_format(self, scene_id):
gt_file_path = '/rhome/jhou/data/dataset/scannet/scannet_benchmark/gt_instance/'
gt_file = os.path.join(gt_file_path, scene_id + '.txt')
gt_ids = util_3d.load_ids(gt_file)
self.add_gt(gt_ids, scene_id)
def evaluate(self):
print('evaluating', len(self.pred_instances), 'scans...')
matches = {}
for i, scene_id in enumerate(self.pred_instances):
gt2pred, pred2gt = self.assign_instances_for_scan(scene_id)
matches[scene_id] = {}
matches[scene_id]['gt'] = gt2pred
matches[scene_id]['pred'] = pred2gt
sys.stdout.write("\rscans processed: {}".format(i+1))
sys.stdout.flush()
print('')
ap_scores = self.evaluate_matches(matches)
avgs = self.compute_averages(ap_scores)
# print
self.print_results(avgs)
return avgs['all_ap'], avgs['all_ap_50%'], avgs['all_ap_25%']
def write_result_file(avgs, filename):
_SPLITTER = ','
with open(filename, 'w') as f:
f.write(_SPLITTER.join(['class', 'class id', 'ap', 'ap50', 'ap25']) + '\n')
for i in range(len(VALID_CLASS_IDS)):
class_name = CLASS_LABELS[i]
class_id = VALID_CLASS_IDS[i]
ap = avgs["classes"][class_name]["ap"]
ap50 = avgs["classes"][class_name]["ap50%"]
ap25 = avgs["classes"][class_name]["ap25%"]
f.write(_SPLITTER.join([str(x) for x in [class_name, class_id, ap, ap50, ap25]]) + '\n')
def config():
parser = argparse.ArgumentParser()
parser.add_argument('--pred_path', required=True, help='path to directory of predicted .txt files')
parser.add_argument('--gt_path', required=True, help='path to directory of gt .txt files')
parser.add_argument('--output_file', default='semantic_instance_evaluation.txt', help='output file [default: semantic_instance_evaluation.txt]')
opt = parser.parse_args()
return opt
if __name__ == '__main__':
opt = config()
setup_logging()
#-----------------scannet----------------------
CLASS_LABELS = ['cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window',
'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator',
'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture']
VALID_CLASS_IDS = np.array([3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])
evaluator = Evaluator(CLASS_LABELS=CLASS_LABELS, VALID_CLASS_IDS=VALID_CLASS_IDS)
print('reading', len(os.listdir(opt.pred_path))-1, 'scans...')
for i, pred_file in enumerate(os.listdir(opt.pred_path)):
if os.path.isdir(os.path.join(opt.pred_path, pred_file)):
continue
scene_id = pred_file[:12]
sys.stdout.write("\rscans read: {}".format(i+1))
sys.stdout.flush()
gt_file = os.path.join(opt.gt_path, pred_file)
gt_ids = util_3d.load_ids(gt_file)
evaluator.add_gt(gt_ids, scene_id)
instances = util_3d.read_instance_prediction_file(os.path.join(opt.pred_path,pred_file), opt.pred_path)
for pred_mask_file in instances:
# read the mask
pred_mask = util_3d.load_ids(pred_mask_file)
instances[pred_mask_file]['pred_mask'] = pred_mask
evaluator.add_prediction(instances, scene_id)
print('')
_, _, _ = evaluator.evaluate()
| ContrastiveSceneContexts-main | downstream/insseg/datasets/evaluation/evaluate_semantic_instance.py |
import os, sys
import csv
try:
import numpy as np
except:
print("Failed to import numpy package.")
sys.exit(-1)
try:
import imageio
except:
print("Please install the module 'imageio' for image processing, e.g.")
print("pip install imageio")
sys.exit(-1)
# print an error message and quit
def print_error(message, user_fault=False):
sys.stderr.write('ERROR: ' + str(message) + '\n')
if user_fault:
sys.exit(2)
sys.exit(-1)
# if string s represents an int
def represents_int(s):
try:
int(s)
return True
except ValueError:
return False
def read_label_mapping(filename, label_from='raw_category', label_to='nyu40id'):
assert os.path.isfile(filename)
mapping = dict()
with open(filename) as csvfile:
reader = csv.DictReader(csvfile, delimiter='\t')
for row in reader:
mapping[row[label_from]] = int(row[label_to])
# if ints convert
if represents_int([key for key in mapping.keys()][0]):
mapping = {int(k):v for k,v in mapping.items()}
return mapping
# input: scene_types.txt or scene_types_all.txt
def read_scene_types_mapping(filename, remove_spaces=True):
assert os.path.isfile(filename)
mapping = dict()
lines = open(filename).read().splitlines()
lines = [line.split('\t') for line in lines]
if remove_spaces:
mapping = { x[1].strip():int(x[0]) for x in lines }
else:
mapping = { x[1]:int(x[0]) for x in lines }
return mapping
# color by label
def visualize_label_image(filename, image):
height = image.shape[0]
width = image.shape[1]
vis_image = np.zeros([height, width, 3], dtype=np.uint8)
color_palette = create_color_palette()
for idx, color in enumerate(color_palette):
vis_image[image==idx] = color
imageio.imwrite(filename, vis_image)
# color by different instances (mod length of color palette)
def visualize_instance_image(filename, image):
height = image.shape[0]
width = image.shape[1]
vis_image = np.zeros([height, width, 3], dtype=np.uint8)
color_palette = create_color_palette()
instances = np.unique(image)
for idx, inst in enumerate(instances):
vis_image[image==inst] = color_palette[inst%len(color_palette)]
imageio.imwrite(filename, vis_image)
| ContrastiveSceneContexts-main | downstream/insseg/datasets/evaluation/scannet_benchmark_utils/util.py |
import os, sys
import json
try:
import numpy as np
except:
print("Failed to import numpy package.")
sys.exit(-1)
try:
from plyfile import PlyData, PlyElement
except:
print("Please install the module 'plyfile' for PLY i/o, e.g.")
print("pip install plyfile")
sys.exit(-1)
from . import util
# matrix: 4x4 np array
# points Nx3 np array
def transform_points(matrix, points):
assert len(points.shape) == 2 and points.shape[1] == 3
num_points = points.shape[0]
p = np.concatenate([points, np.ones((num_points, 1))], axis=1)
p = np.matmul(matrix, np.transpose(p))
p = np.transpose(p)
p[:,:3] /= p[:,3,None]
return p[:,:3]
def export_ids(filename, ids):
with open(filename, 'w') as f:
for id in ids:
f.write('%d\n' % id)
def load_ids(filename):
ids = open(filename).read().splitlines()
ids = np.array(ids, dtype=np.int64)
return ids
def read_mesh_vertices(filename):
assert os.path.isfile(filename)
with open(filename, 'rb') as f:
plydata = PlyData.read(f)
num_verts = plydata['vertex'].count
vertices = np.zeros(shape=[num_verts, 3], dtype=np.float32)
vertices[:,0] = plydata['vertex'].data['x']
vertices[:,1] = plydata['vertex'].data['y']
vertices[:,2] = plydata['vertex'].data['z']
return vertices
# export 3d instance labels for instance evaluation
def export_instance_ids_for_eval(filename, label_ids, instance_ids):
assert label_ids.shape[0] == instance_ids.shape[0]
output_mask_path_relative = 'predicted_masks'
name = os.path.splitext(os.path.basename(filename))[0]
output_mask_path = os.path.join(os.path.dirname(filename), output_mask_path_relative)
if not os.path.isdir(output_mask_path):
os.mkdir(output_mask_path)
insts = np.unique(instance_ids)
zero_mask = np.zeros(shape=(instance_ids.shape[0]), dtype=np.int32)
with open(filename, 'w') as f:
for idx, inst_id in enumerate(insts):
if inst_id == 0: # 0 -> no instance for this vertex
continue
loc = np.where(instance_ids == inst_id)
label_id = label_ids[loc[0][0]]
# write mask indexing
output_mask_file_relavtive = os.path.join(output_mask_path_relative, name + '_' + str(idx) + '.txt')
f.write('%s %d %f\n' % (output_mask_file_relavtive, label_id, 1.0))
# write mask
mask = np.copy(zero_mask)
mask[loc[0]] = 1
output_mask_file = os.path.join(output_mask_path, name + '_' + str(idx) + '.txt')
export_ids(output_mask_file, mask)
# ------------ Instance Utils ------------ #
class Instance(object):
instance_id = 0
label_id = 0
vert_count = 0
med_dist = -1
dist_conf = 0.0
def __init__(self, mesh_vert_instances, instance_id):
if (instance_id == -1):
return
self.instance_id = int(instance_id)
self.label_id = int(self.get_label_id(instance_id))
self.vert_count = int(self.get_instance_verts(mesh_vert_instances, instance_id))
def get_label_id(self, instance_id):
return int(instance_id // 1000)
def get_instance_verts(self, mesh_vert_instances, instance_id):
return (mesh_vert_instances == instance_id).sum()
def to_json(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
def to_dict(self):
dict = {}
dict["instance_id"] = self.instance_id
dict["label_id"] = self.label_id
dict["vert_count"] = self.vert_count
dict["med_dist"] = self.med_dist
dict["dist_conf"] = self.dist_conf
return dict
def from_json(self, data):
self.instance_id = int(data["instance_id"])
self.label_id = int(data["label_id"])
self.vert_count = int(data["vert_count"])
if ("med_dist" in data):
self.med_dist = float(data["med_dist"])
self.dist_conf = float(data["dist_conf"])
def __str__(self):
return "("+str(self.instance_id)+")"
def read_instance_prediction_file(filename, pred_path):
lines = open(filename).read().splitlines()
instance_info = {}
abs_pred_path = os.path.abspath(pred_path)
for line in lines:
parts = line.split(' ')
if len(parts) != 3:
util.print_error('invalid instance prediction file. Expected (per line): [rel path prediction] [label id prediction] [confidence prediction]')
if os.path.isabs(parts[0]):
util.print_error('invalid instance prediction file. First entry in line must be a relative path')
mask_file = os.path.join(os.path.dirname(filename), parts[0])
mask_file = os.path.abspath(mask_file)
# check that mask_file lives inside prediction path
if os.path.commonprefix([mask_file, abs_pred_path]) != abs_pred_path:
util.print_error('predicted mask {} in prediction text file {} points outside of prediction path.'.format(mask_file,filename))
info = {}
info["label_id"] = int(float(parts[1]))
info["conf"] = float(parts[2])
instance_info[mask_file] = info
return instance_info
def get_instances(ids, class_ids, class_labels, id2label):
instances = {}
for label in class_labels:
instances[label] = []
instance_ids = np.unique(ids)
for id in instance_ids:
if id == 0:
continue
inst = Instance(ids, id)
if inst.label_id in class_ids:
instances[id2label[inst.label_id]].append(inst.to_dict())
return instances
| ContrastiveSceneContexts-main | downstream/insseg/datasets/evaluation/scannet_benchmark_utils/util_3d.py |
# Evaluates semantic label task
# Input:
# - path to .txt prediction files
# - path to .txt ground truth files
# - output file to write results to
# Note that only the valid classes are used for evaluation,
# i.e., any ground truth label not in the valid label set
# is ignored in the evaluation.
#
# example usage: evaluate_semantic_label.py --scan_path [path to scan data] --output_file [output file]
# python imports
import math
import logging
import os, sys, argparse
import inspect
try:
import numpy as np
except:
print("Failed to import numpy package.")
sys.exit(-1)
try:
from itertools import izip
except ImportError:
izip = zip
#currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
#parentdir = os.path.dirname(currentdir)
#sys.path.insert(0,parentdir)
import util_3d
import util
class Evaluator:
def __init__(self, CLASS_LABELS, VALID_CLASS_IDS):
self.CLASS_LABELS = CLASS_LABELS
self.VALID_CLASS_IDS = VALID_CLASS_IDS
self.UNKNOWN_ID = np.max(VALID_CLASS_IDS) + 1
self.gt = {}
self.pred = {}
max_id = self.UNKNOWN_ID
self.confusion = np.zeros((max_id+1, max_id+1), dtype=np.ulonglong)
def update_confusion(self, pred_ids, gt_ids, sceneId=None):
# sanity checks
if not pred_ids.shape == gt_ids.shape:
util.print_error('%s: number of predicted values does not match number of vertices' % pred_file, user_fault=True)
n = self.confusion.shape[0]
k = (gt_ids >= 0) & (gt_ids < n)
temporal = np.bincount(n * gt_ids[k].astype(int) + pred_ids[k], minlength=n**2).reshape(n, n)
for valid_class_row in self.VALID_CLASS_IDS:
for valid_class_col in self.VALID_CLASS_IDS:
self.confusion[valid_class_row][valid_class_col] += temporal[valid_class_row][valid_class_col]
@staticmethod
def write_to_benchmark(base='benchmark_segmentation', sceneId=None, pred_ids=None):
os.makedirs(base, exist_ok=True)
util_3d.export_ids('{}.txt'.format(os.path.join(base, sceneId)), pred_ids)
def get_iou(self, label_id, confusion):
if not label_id in self.VALID_CLASS_IDS:
return float('nan')
# #true positives
tp = np.longlong(confusion[label_id, label_id])
# #false negatives
fn = np.longlong(confusion[label_id, :].sum()) - tp
# #false positives
not_ignored = [l for l in self.VALID_CLASS_IDS if not l == label_id]
fp = np.longlong(confusion[not_ignored, label_id].sum())
denom = (tp + fp + fn)
if denom == 0:
return float('nan')
return (float(tp) / denom, tp, denom)
def write_result_file(self, confusion, ious, filename):
with open(filename, 'w') as f:
f.write('iou scores\n')
for i in range(len(self.VALID_CLASS_IDS)):
label_id = self.VALID_CLASS_IDS[i]
label_name = self.CLASS_LABELS[i]
iou = ious[label_name][0]
f.write('{0:<14s}({1:<2d}): {2:>5.3f}\n'.format(label_name, label_id, iou))
f.write("{0:<14s}: {1:>5.3f}".format('mean', np.array([ious[k][0] for k in ious]).mean()))
f.write('\nconfusion matrix\n')
f.write('\t\t\t')
for i in range(len(self.VALID_CLASS_IDS)):
#f.write('\t{0:<14s}({1:<2d})'.format(CLASS_LABELS[i], VALID_CLASS_IDS[i]))
f.write('{0:<8d}'.format(self.VALID_CLASS_IDS[i]))
f.write('\n')
for r in range(len(self.VALID_CLASS_IDS)):
f.write('{0:<14s}({1:<2d})'.format(self.CLASS_LABELS[r], self.VALID_CLASS_IDS[r]))
for c in range(len(self.VALID_CLASS_IDS)):
f.write('\t{0:>5.3f}'.format(confusion[self.VALID_CLASS_IDS[r],self.VALID_CLASS_IDS[c]]))
f.write('\n')
print('wrote results to', filename)
def evaluate_confusion(self, output_file=None):
class_ious = {}
counter = 0
summation = 0
for i in range(len(self.VALID_CLASS_IDS)):
label_name = self.CLASS_LABELS[i]
label_id = self.VALID_CLASS_IDS[i]
class_ious[label_name] = self.get_iou(label_id, self.confusion)
# print
logging.info('classes IoU')
logging.info('----------------------------')
for i in range(len(self.VALID_CLASS_IDS)):
label_name = self.CLASS_LABELS[i]
try:
logging.info('{0:<14s}: {1:>5.3f} ({2:>6d}/{3:<6d})'.format(label_name, class_ious[label_name][0], class_ious[label_name][1], class_ious[label_name][2]))
summation += class_ious[label_name][0]
counter += 1
except:
logging.info('{0:<14s}: nan ( nan/nan )'.format(label_name))
logging.info("{0:<14s}: {1:>5.3f}".format('mean', summation / counter))
if output_file:
self.write_result_file(self.confusion, class_ious, output_file)
return summation / counter
def config():
parser = argparse.ArgumentParser()
parser.add_argument('--pred_path', required=True, help='path to directory of predicted .txt files')
parser.add_argument('--gt_path', required=True, help='path to gt files')
parser.add_argument('--output_file', type=str, default='./semantic_label_evaluation.txt')
opt = parser.parse_args()
return opt
def main():
opt = config()
ch = logging.StreamHandler(sys.stdout)
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(
format=os.uname()[1].split('.')[0] + ' %(asctime)s %(message)s',
datefmt='%m/%d %H:%M:%S',
handlers=[ch])
#------------------------- ScanNet --------------------------
CLASS_LABELS = ['wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table',
'door', 'window', 'bookshelf', 'picture', 'counter', 'desk',
'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture']
VALID_CLASS_IDS = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])
evaluator = Evaluator(CLASS_LABELS=CLASS_LABELS, VALID_CLASS_IDS=VALID_CLASS_IDS)
print('reading', len(os.listdir(opt.pred_path))-1, 'scans...')
for i, pred_file in enumerate(os.listdir(opt.pred_path)):
if pred_file == 'semantic_label_evaluation.txt':
continue
gt_file = os.path.join(opt.gt_path, pred_file)
if not os.path.isfile(gt_file):
util.print_error('Result file {} does not match any gt file'.format(pred_file), user_fault=True)
gt_ids = util_3d.load_ids(gt_file)
pred_file = os.path.join(opt.pred_path, pred_file)
pred_ids = util_3d.load_ids(pred_file)
evaluator.update_confusion(pred_ids, gt_ids, pred_file.split('.')[0])
sys.stdout.write("\rscans processed: {}".format(i+1))
sys.stdout.flush()
# evaluate
evaluator.evaluate_confusion(opt.output_file)
if __name__ == '__main__':
main()
| ContrastiveSceneContexts-main | downstream/insseg/datasets/evaluation/scannet_benchmark_utils/scripts/evaluate_semantic_label.py |
import os, sys
import csv
try:
import numpy as np
except:
print("Failed to import numpy package.")
sys.exit(-1)
try:
import imageio
except:
print("Please install the module 'imageio' for image processing, e.g.")
print("pip install imageio")
sys.exit(-1)
# print an error message and quit
def print_error(message, user_fault=False):
sys.stderr.write('ERROR: ' + str(message) + '\n')
if user_fault:
sys.exit(2)
sys.exit(-1)
# if string s represents an int
def represents_int(s):
try:
int(s)
return True
except ValueError:
return False
def read_label_mapping(filename, label_from='raw_category', label_to='nyu40id'):
assert os.path.isfile(filename)
mapping = dict()
with open(filename) as csvfile:
reader = csv.DictReader(csvfile, delimiter='\t')
for row in reader:
mapping[row[label_from]] = int(row[label_to])
# if ints convert
if represents_int([key for key in mapping.keys()][0]):
mapping = {int(k):v for k,v in mapping.items()}
return mapping
# input: scene_types.txt or scene_types_all.txt
def read_scene_types_mapping(filename, remove_spaces=True):
assert os.path.isfile(filename)
mapping = dict()
lines = open(filename).read().splitlines()
lines = [line.split('\t') for line in lines]
if remove_spaces:
mapping = { x[1].strip():int(x[0]) for x in lines }
else:
mapping = { x[1]:int(x[0]) for x in lines }
return mapping
# color by label
def visualize_label_image(filename, image):
height = image.shape[0]
width = image.shape[1]
vis_image = np.zeros([height, width, 3], dtype=np.uint8)
color_palette = create_color_palette()
for idx, color in enumerate(color_palette):
vis_image[image==idx] = color
imageio.imwrite(filename, vis_image)
# color by different instances (mod length of color palette)
def visualize_instance_image(filename, image):
height = image.shape[0]
width = image.shape[1]
vis_image = np.zeros([height, width, 3], dtype=np.uint8)
color_palette = create_color_palette()
instances = np.unique(image)
for idx, inst in enumerate(instances):
vis_image[image==inst] = color_palette[inst%len(color_palette)]
imageio.imwrite(filename, vis_image)
| ContrastiveSceneContexts-main | downstream/insseg/datasets/evaluation/scannet_benchmark_utils/scripts/util.py |
import os, sys
import json
try:
import numpy as np
except:
print("Failed to import numpy package.")
sys.exit(-1)
try:
from plyfile import PlyData, PlyElement
except:
print("Please install the module 'plyfile' for PLY i/o, e.g.")
print("pip install plyfile")
sys.exit(-1)
import util
# matrix: 4x4 np array
# points Nx3 np array
def transform_points(matrix, points):
assert len(points.shape) == 2 and points.shape[1] == 3
num_points = points.shape[0]
p = np.concatenate([points, np.ones((num_points, 1))], axis=1)
p = np.matmul(matrix, np.transpose(p))
p = np.transpose(p)
p[:,:3] /= p[:,3,None]
return p[:,:3]
def export_ids(filename, ids):
with open(filename, 'w') as f:
for id in ids:
f.write('%d\n' % id)
def load_ids(filename):
ids = open(filename).read().splitlines()
ids = np.array(ids, dtype=np.int64)
return ids
def read_mesh_vertices(filename):
assert os.path.isfile(filename)
with open(filename, 'rb') as f:
plydata = PlyData.read(f)
num_verts = plydata['vertex'].count
vertices = np.zeros(shape=[num_verts, 3], dtype=np.float32)
vertices[:,0] = plydata['vertex'].data['x']
vertices[:,1] = plydata['vertex'].data['y']
vertices[:,2] = plydata['vertex'].data['z']
return vertices
# export 3d instance labels for instance evaluation
def export_instance_ids_for_eval(filename, label_ids, instance_ids):
assert label_ids.shape[0] == instance_ids.shape[0]
output_mask_path_relative = 'predicted_masks'
name = os.path.splitext(os.path.basename(filename))[0]
output_mask_path = os.path.join(os.path.dirname(filename), output_mask_path_relative)
if not os.path.isdir(output_mask_path):
os.mkdir(output_mask_path)
insts = np.unique(instance_ids)
zero_mask = np.zeros(shape=(instance_ids.shape[0]), dtype=np.int32)
with open(filename, 'w') as f:
for idx, inst_id in enumerate(insts):
if inst_id == 0: # 0 -> no instance for this vertex
continue
loc = np.where(instance_ids == inst_id)
label_id = label_ids[loc[0][0]]
# write mask indexing
output_mask_file_relavtive = os.path.join(output_mask_path_relative, name + '_' + str(idx) + '.txt')
f.write('%s %d %f\n' % (output_mask_file_relavtive, label_id, 1.0))
# write mask
mask = np.copy(zero_mask)
mask[loc[0]] = 1
output_mask_file = os.path.join(output_mask_path, name + '_' + str(idx) + '.txt')
export_ids(output_mask_file, mask)
# ------------ Instance Utils ------------ #
class Instance(object):
instance_id = 0
label_id = 0
vert_count = 0
med_dist = -1
dist_conf = 0.0
def __init__(self, mesh_vert_instances, instance_id):
if (instance_id == -1):
return
self.instance_id = int(instance_id)
self.label_id = int(self.get_label_id(instance_id))
self.vert_count = int(self.get_instance_verts(mesh_vert_instances, instance_id))
def get_label_id(self, instance_id):
return int(instance_id // 1000)
def get_instance_verts(self, mesh_vert_instances, instance_id):
return (mesh_vert_instances == instance_id).sum()
def to_json(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
def to_dict(self):
dict = {}
dict["instance_id"] = self.instance_id
dict["label_id"] = self.label_id
dict["vert_count"] = self.vert_count
dict["med_dist"] = self.med_dist
dict["dist_conf"] = self.dist_conf
return dict
def from_json(self, data):
self.instance_id = int(data["instance_id"])
self.label_id = int(data["label_id"])
self.vert_count = int(data["vert_count"])
if ("med_dist" in data):
self.med_dist = float(data["med_dist"])
self.dist_conf = float(data["dist_conf"])
def __str__(self):
return "("+str(self.instance_id)+")"
def read_instance_prediction_file(filename, pred_path):
lines = open(filename).read().splitlines()
instance_info = {}
abs_pred_path = os.path.abspath(pred_path)
for line in lines:
parts = line.split(' ')
if len(parts) != 3:
util.print_error('invalid instance prediction file. Expected (per line): [rel path prediction] [label id prediction] [confidence prediction]')
if os.path.isabs(parts[0]):
util.print_error('invalid instance prediction file. First entry in line must be a relative path')
mask_file = os.path.join(os.path.dirname(filename), parts[0])
mask_file = os.path.abspath(mask_file)
# check that mask_file lives inside prediction path
if os.path.commonprefix([mask_file, abs_pred_path]) != abs_pred_path:
util.print_error('predicted mask {} in prediction text file {} points outside of prediction path.'.format(mask_file,filename))
info = {}
info["label_id"] = int(float(parts[1]))
info["conf"] = float(parts[2])
instance_info[mask_file] = info
return instance_info
def get_instances(ids, class_ids, class_labels, id2label):
instances = {}
for label in class_labels:
instances[label] = []
instance_ids = np.unique(ids)
for id in instance_ids:
if id == 0:
continue
inst = Instance(ids, id)
if inst.label_id in class_ids:
instances[id2label[inst.label_id]].append(inst.to_dict())
return instances
| ContrastiveSceneContexts-main | downstream/insseg/datasets/evaluation/scannet_benchmark_utils/scripts/util_3d.py |
# Evaluates semantic instance task
# Adapted from the CityScapes evaluation: https://github.com/mcordts/cityscapesScripts/tree/master/cityscapesscripts/evaluation
# Input:
# - path to .txt prediction files
# - path to .txt ground truth files
# - output file to write results to
# Each .txt prediction file look like:
# [(pred0) rel. path to pred. mask over verts as .txt] [(pred0) label id] [(pred0) confidence]
# [(pred1) rel. path to pred. mask over verts as .txt] [(pred1) label id] [(pred1) confidence]
# [(pred2) rel. path to pred. mask over verts as .txt] [(pred2) label id] [(pred2) confidence]
# ...
#
# NOTE: The prediction files must live in the root of the given prediction path.
# Predicted mask .txt files must live in a subfolder.
# Additionally, filenames must not contain spaces.
# The relative paths to predicted masks must contain one integer per line,
# where each line corresponds to vertices in the *_vh_clean_2.ply (in that order).
# Non-zero integers indicate part of the predicted instance.
# The label ids specify the class of the corresponding mask.
# Confidence is a float confidence score of the mask.
#
# Note that only the valid classes are used for evaluation,
# i.e., any ground truth label not in the valid label set
# is ignored in the evaluation.
#
# example usage: evaluate_semantic_instance.py --scan_path [path to scan data] --output_file [output file]
# python imports
import logging
import math
import os, sys, argparse
import inspect
from copy import deepcopy
import argparse
import numpy as np
#currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
#parentdir = os.path.dirname(currentdir)
#sys.path.insert(0,parentdir)
import util_3d
import util
def setup_logging():
ch = logging.StreamHandler(sys.stdout)
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(
format=os.uname()[1].split('.')[0] + ' %(asctime)s %(message)s',
datefmt='%m/%d %H:%M:%S',
handlers=[ch])
class Evaluator:
# ---------- Evaluation params ---------- #
# overlaps for evaluation
overlaps = np.append(np.arange(0.5,0.95,0.05), 0.25)
# minimum region size for evaluation [verts]
min_region_sizes = np.array( [ 100 ] )
# distance thresholds [m]
distance_threshes = np.array( [ float('inf') ] )
# distance confidences
distance_confs = np.array( [ -float('inf') ] )
def __init__(self, CLASS_LABELS, VALID_CLASS_IDS, benchmark=False):
# ---------- Label info ---------- #
#CLASS_LABELS = ['cabinet', 'bed', 'chair', 'sofa', 'table', 'door',
# 'window', 'bookshelf', 'picture', 'counter',
# 'desk', 'curtain', 'refrigerator', 'shower curtain',
# 'toilet', 'sink', 'bathtub', 'otherfurniture']
#VALID_CLASS_IDS = np.array([3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])
self.CLASS_LABELS = CLASS_LABELS
self.VALID_CLASS_IDS = VALID_CLASS_IDS
self.ID_TO_LABEL = {}
self.LABEL_TO_ID = {}
for i in range(len(VALID_CLASS_IDS)):
self.LABEL_TO_ID[CLASS_LABELS[i]] = VALID_CLASS_IDS[i]
self.ID_TO_LABEL[VALID_CLASS_IDS[i]] = CLASS_LABELS[i]
self.pred_instances = {}
self.gt_instances = {}
self.benchmark = benchmark
def evaluate_matches(self, matches):
# results: class x overlap
ap = np.zeros( (len(self.distance_threshes) , len(self.CLASS_LABELS) , len(self.overlaps)) , np.float )
for di, (min_region_size, distance_thresh, distance_conf) in enumerate(zip(self.min_region_sizes, self.distance_threshes, self.distance_confs)):
for oi, overlap_th in enumerate(self.overlaps):
pred_visited = {}
for m in matches:
for p in matches[m]['pred']:
for label_name in self.CLASS_LABELS:
for p in matches[m]['pred'][label_name]:
if 'filename' in p:
pred_visited[p['filename']] = False
for li, label_name in enumerate(self.CLASS_LABELS):
y_true = np.empty(0)
y_score = np.empty(0)
hard_false_negatives = 0
has_gt = False
has_pred = False
for m in matches:
pred_instances = matches[m]['pred'][label_name]
gt_instances = matches[m]['gt'][label_name]
# filter groups in ground truth
gt_instances = [ gt for gt in gt_instances if gt['instance_id']>=1000 and gt['vert_count']>=min_region_size and gt['med_dist']<=distance_thresh and gt['dist_conf']>=distance_conf ]
if gt_instances:
has_gt = True
if pred_instances:
has_pred = True
cur_true = np.ones ( len(gt_instances) )
cur_score = np.ones ( len(gt_instances) ) * (-float("inf"))
cur_match = np.zeros( len(gt_instances) , dtype=np.bool )
# collect matches
for (gti,gt) in enumerate(gt_instances):
found_match = False
num_pred = len(gt['matched_pred'])
for pred in gt['matched_pred']:
# greedy assignments
if pred_visited[pred['filename']]:
continue
overlap = float(pred['intersection']) / (gt['vert_count']+pred['vert_count']-pred['intersection'])
if overlap > overlap_th:
confidence = pred['confidence']
# if already have a prediction for this gt,
# the prediction with the lower score is automatically a false positive
if cur_match[gti]:
max_score = max( cur_score[gti] , confidence )
min_score = min( cur_score[gti] , confidence )
cur_score[gti] = max_score
# append false positive
cur_true = np.append(cur_true,0)
cur_score = np.append(cur_score,min_score)
cur_match = np.append(cur_match,True)
# otherwise set score
else:
found_match = True
cur_match[gti] = True
cur_score[gti] = confidence
pred_visited[pred['filename']] = True
if not found_match:
hard_false_negatives += 1
# remove non-matched ground truth instances
cur_true = cur_true [ cur_match==True ]
cur_score = cur_score[ cur_match==True ]
# collect non-matched predictions as false positive
for pred in pred_instances:
found_gt = False
for gt in pred['matched_gt']:
overlap = float(gt['intersection']) / (gt['vert_count']+pred['vert_count']-gt['intersection'])
if overlap > overlap_th:
found_gt = True
break
if not found_gt:
num_ignore = pred['void_intersection']
for gt in pred['matched_gt']:
# group?
if gt['instance_id'] < 1000:
num_ignore += gt['intersection']
# small ground truth instances
if gt['vert_count'] < min_region_size or gt['med_dist']>distance_thresh or gt['dist_conf']<distance_conf:
num_ignore += gt['intersection']
proportion_ignore = float(num_ignore)/pred['vert_count']
# if not ignored append false positive
if proportion_ignore <= overlap_th:
cur_true = np.append(cur_true,0)
confidence = pred["confidence"]
cur_score = np.append(cur_score,confidence)
# append to overall results
y_true = np.append(y_true,cur_true)
y_score = np.append(y_score,cur_score)
# compute average precision
if has_gt and has_pred:
# compute precision recall curve first
# sorting and cumsum
score_arg_sort = np.argsort(y_score)
y_score_sorted = y_score[score_arg_sort]
y_true_sorted = y_true[score_arg_sort]
y_true_sorted_cumsum = np.cumsum(y_true_sorted)
# unique thresholds
(thresholds,unique_indices) = np.unique( y_score_sorted , return_index=True )
num_prec_recall = len(unique_indices) + 1
# prepare precision recall
num_examples = len(y_score_sorted)
try:
num_true_examples = y_true_sorted_cumsum[-1]
except:
num_true_examples = 0
precision = np.zeros(num_prec_recall)
recall = np.zeros(num_prec_recall)
# deal with the first point
y_true_sorted_cumsum = np.append( y_true_sorted_cumsum , 0 )
# deal with remaining
for idx_res,idx_scores in enumerate(unique_indices):
cumsum = y_true_sorted_cumsum[idx_scores-1]
tp = num_true_examples - cumsum
fp = num_examples - idx_scores - tp
fn = cumsum + hard_false_negatives
p = float(tp)/(tp+fp)
r = float(tp)/(tp+fn)
precision[idx_res] = p
recall [idx_res] = r
# first point in curve is artificial
precision[-1] = 1.
recall [-1] = 0.
# compute average of precision-recall curve
recall_for_conv = np.copy(recall)
recall_for_conv = np.append(recall_for_conv[0], recall_for_conv)
recall_for_conv = np.append(recall_for_conv, 0.)
stepWidths = np.convolve(recall_for_conv,[-0.5,0,0.5],'valid')
# integrate is now simply a dot product
ap_current = np.dot(precision, stepWidths)
elif has_gt:
ap_current = 0.0
else:
ap_current = float('nan')
ap[di,li,oi] = ap_current
return ap
def compute_averages(self, aps):
d_inf = 0
o50 = np.where(np.isclose(self.overlaps,0.5))
o25 = np.where(np.isclose(self.overlaps,0.25))
oAllBut25 = np.where(np.logical_not(np.isclose(self.overlaps,0.25)))
avg_dict = {}
#avg_dict['all_ap'] = np.nanmean(aps[ d_inf,:,: ])
avg_dict['all_ap'] = np.nanmean(aps[ d_inf,:,oAllBut25])
avg_dict['all_ap_50%'] = np.nanmean(aps[ d_inf,:,o50])
avg_dict['all_ap_25%'] = np.nanmean(aps[ d_inf,:,o25])
avg_dict["classes"] = {}
for (li,label_name) in enumerate(self.CLASS_LABELS):
avg_dict["classes"][label_name] = {}
#avg_dict["classes"][label_name]["ap"] = np.average(aps[ d_inf,li, :])
avg_dict["classes"][label_name]["ap"] = np.average(aps[ d_inf,li,oAllBut25])
avg_dict["classes"][label_name]["ap50%"] = np.average(aps[ d_inf,li,o50])
avg_dict["classes"][label_name]["ap25%"] = np.average(aps[ d_inf,li,o25])
return avg_dict
def assign_instances_for_scan(self, scene_id):
# get gt instances
gt_ids = self.gt_instances[scene_id]
gt_instances = util_3d.get_instances(gt_ids, self.VALID_CLASS_IDS, self.CLASS_LABELS, self.ID_TO_LABEL)
# associate
gt2pred = deepcopy(gt_instances)
for label in gt2pred:
for gt in gt2pred[label]:
gt['matched_pred'] = []
pred2gt = {}
for label in self.CLASS_LABELS:
pred2gt[label] = []
num_pred_instances = 0
# mask of void labels in the groundtruth
bool_void = np.logical_not(np.in1d(gt_ids//1000, self.VALID_CLASS_IDS))
# go thru all prediction masks
for instance_id in self.pred_instances[scene_id]:
label_id = int(self.pred_instances[scene_id][instance_id]['label_id'])
conf = self.pred_instances[scene_id][instance_id]['conf']
if not label_id in self.ID_TO_LABEL:
continue
label_name = self.ID_TO_LABEL[label_id]
# read the mask
pred_mask = self.pred_instances[scene_id][instance_id]['pred_mask']
# convert to binary
num = np.count_nonzero(pred_mask)
if num < self.min_region_sizes[0]:
continue # skip if empty
pred_instance = {}
pred_instance['filename'] = str(scene_id) + '/' + str(instance_id)
pred_instance['pred_id'] = num_pred_instances
pred_instance['label_id'] = label_id
pred_instance['vert_count'] = num
pred_instance['confidence'] = conf
pred_instance['void_intersection'] = np.count_nonzero(np.logical_and(bool_void, pred_mask))
# matched gt instances
matched_gt = []
# go thru all gt instances with matching label
for (gt_num, gt_inst) in enumerate(gt2pred[label_name]):
intersection = np.count_nonzero(np.logical_and(gt_ids == gt_inst['instance_id'], pred_mask))
if intersection > 0:
gt_copy = gt_inst.copy()
pred_copy = pred_instance.copy()
gt_copy['intersection'] = intersection
pred_copy['intersection'] = intersection
matched_gt.append(gt_copy)
gt2pred[label_name][gt_num]['matched_pred'].append(pred_copy)
pred_instance['matched_gt'] = matched_gt
num_pred_instances += 1
pred2gt[label_name].append(pred_instance)
return gt2pred, pred2gt
def print_results(self, avgs):
sep = ""
col1 = ":"
lineLen = 64
logging.info("")
logging.info("#"*lineLen)
line = ""
line += "{:<15}".format("what" ) + sep + col1
line += "{:>15}".format("AP" ) + sep
line += "{:>15}".format("AP_50%" ) + sep
line += "{:>15}".format("AP_25%" ) + sep
logging.info(line)
logging.info("#"*lineLen)
for (li,label_name) in enumerate(self.CLASS_LABELS):
ap_avg = avgs["classes"][label_name]["ap"]
ap_50o = avgs["classes"][label_name]["ap50%"]
ap_25o = avgs["classes"][label_name]["ap25%"]
line = "{:<15}".format(label_name) + sep + col1
line += sep + "{:>15.3f}".format(ap_avg ) + sep
line += sep + "{:>15.3f}".format(ap_50o ) + sep
line += sep + "{:>15.3f}".format(ap_25o ) + sep
logging.info(line)
all_ap_avg = avgs["all_ap"]
all_ap_50o = avgs["all_ap_50%"]
all_ap_25o = avgs["all_ap_25%"]
logging.info("-"*lineLen)
line = "{:<15}".format("average") + sep + col1
line += "{:>15.3f}".format(all_ap_avg) + sep
line += "{:>15.3f}".format(all_ap_50o) + sep
line += "{:>15.3f}".format(all_ap_25o) + sep
logging.info(line)
logging.info("")
@staticmethod
def write_to_benchmark(output_path='benchmark_instance', scene_id=None, pred_inst={}):
os.makedirs(output_path, exist_ok=True)
os.makedirs(os.path.join(output_path, 'predicted_masks'), exist_ok=True)
f = open(os.path.join(output_path, scene_id + '.txt'), 'w')
for instance_id in pred_inst:
# for pred instance id starts from 0; in gt valid instance id starts from 1
score = pred_inst[instance_id]['conf']
label = pred_inst[instance_id]['label_id']
mask = pred_inst[instance_id]['pred_mask']
f.write('predicted_masks/{}_{:03d}.txt {} {:.4f}'.format(scene_id, instance_id, label, score))
if instance_id < len(pred_inst) - 1:
f.write('\n')
util_3d.export_ids(os.path.join(output_path, 'predicted_masks', scene_id + '_%03d.txt' % (instance_id)), mask)
f.close()
def add_prediction(self, instance_info, id):
self.pred_instances[id] = instance_info
def add_gt(self, instance_info, id):
self.gt_instances[id] = instance_info
def evaluate(self):
print('evaluating', len(self.pred_instances), 'scans...')
matches = {}
for i, scene_id in enumerate(self.pred_instances):
gt2pred, pred2gt = self.assign_instances_for_scan(scene_id)
matches[scene_id] = {}
matches[scene_id]['gt'] = gt2pred
matches[scene_id]['pred'] = pred2gt
sys.stdout.write("\rscans processed: {}".format(i+1))
sys.stdout.flush()
print('')
ap_scores = self.evaluate_matches(matches)
avgs = self.compute_averages(ap_scores)
# print
self.print_results(avgs)
return avgs['all_ap'], avgs['all_ap_50%'], avgs['all_ap_25%']
def write_result_file(avgs, filename):
_SPLITTER = ','
with open(filename, 'w') as f:
f.write(_SPLITTER.join(['class', 'class id', 'ap', 'ap50', 'ap25']) + '\n')
for i in range(len(VALID_CLASS_IDS)):
class_name = CLASS_LABELS[i]
class_id = VALID_CLASS_IDS[i]
ap = avgs["classes"][class_name]["ap"]
ap50 = avgs["classes"][class_name]["ap50%"]
ap25 = avgs["classes"][class_name]["ap25%"]
f.write(_SPLITTER.join([str(x) for x in [class_name, class_id, ap, ap50, ap25]]) + '\n')
def config():
parser = argparse.ArgumentParser()
parser.add_argument('--pred_path', required=True, help='path to directory of predicted .txt files')
parser.add_argument('--gt_path', required=True, help='path to directory of gt .txt files')
parser.add_argument('--output_file', default='semantic_instance_evaluation.txt', help='output file [default: semantic_instance_evaluation.txt]')
opt = parser.parse_args()
return opt
if __name__ == '__main__':
opt = config()
setup_logging()
#-----------------scannet----------------------
CLASS_LABELS = ['cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window',
'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator',
'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture']
VALID_CLASS_IDS = np.array([3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])
evaluator = Evaluator(CLASS_LABELS=CLASS_LABELS, VALID_CLASS_IDS=VALID_CLASS_IDS)
print('reading', len(os.listdir(opt.pred_path))-1, 'scans...')
for i, pred_file in enumerate(os.listdir(opt.pred_path)):
if os.path.isdir(os.path.join(opt.pred_path, pred_file)):
continue
scene_id = pred_file[:12]
sys.stdout.write("\rscans read: {}".format(i+1))
sys.stdout.flush()
gt_file = os.path.join(opt.gt_path, pred_file)
gt_ids = util_3d.load_ids(gt_file)
evaluator.add_gt(gt_ids, scene_id)
instances = util_3d.read_instance_prediction_file(os.path.join(opt.pred_path,pred_file), opt.pred_path)
for pred_mask_file in instances:
# read the mask
pred_mask = util_3d.load_ids(pred_mask_file)
instances[pred_mask_file]['pred_mask'] = pred_mask
evaluator.add_prediction(instances, scene_id)
print('')
_, _, _ = evaluator.evaluate()
| ContrastiveSceneContexts-main | downstream/insseg/datasets/evaluation/scannet_benchmark_utils/scripts/evaluate_semantic_instance.py |
import random
from torch.nn import Module
from MinkowskiEngine import SparseTensor
class Wrapper(Module):
"""
Wrapper for the segmentation networks.
"""
OUT_PIXEL_DIST = -1
def __init__(self, NetClass, in_nchannel, out_nchannel, config):
super(Wrapper, self).__init__()
self.initialize_filter(NetClass, in_nchannel, out_nchannel, config)
def initialize_filter(self, NetClass, in_nchannel, out_nchannel, config):
raise NotImplementedError('Must initialize a model and a filter')
def forward(self, x, coords, colors=None):
soutput = self.model(x)
# During training, make the network invariant to the filter
if not self.training or random.random() < 0.5:
# Filter requires the model to finish the forward pass
wrapper_coords = self.filter.initialize_coords(self.model, coords, colors)
finput = SparseTensor(soutput.F, wrapper_coords)
soutput = self.filter(finput)
return soutput
| ContrastiveSceneContexts-main | downstream/insseg/models/wrapper.py |
from models.resnet import ResNetBase, get_norm
from models.modules.common import ConvType, NormType, conv, conv_tr
from models.modules.resnet_block import BasicBlock, BasicBlockINBN, Bottleneck
import torch.nn as nn
import MinkowskiEngine as ME
from MinkowskiEngine import MinkowskiReLU
import MinkowskiEngine.MinkowskiOps as me
class MinkUNetBase(ResNetBase):
BLOCK = None
PLANES = (64, 128, 256, 512, 256, 128, 128)
DILATIONS = (1, 1, 1, 1, 1, 1)
LAYERS = (2, 2, 2, 2, 2, 2)
INIT_DIM = 64
OUT_PIXEL_DIST = 1
NORM_TYPE = NormType.BATCH_NORM
NON_BLOCK_CONV_TYPE = ConvType.SPATIAL_HYPERCUBE
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS
# To use the model, must call initialize_coords before forward pass.
# Once data is processed, call clear to reset the model before calling initialize_coords
def __init__(self, in_channels, out_channels, config, D=3, **kwargs):
super(MinkUNetBase, self).__init__(in_channels, out_channels, config, D)
def network_initialization(self, in_channels, out_channels, config, D):
# Setup net_metadata
dilations = self.DILATIONS
bn_momentum = config.bn_momentum
def space_n_time_m(n, m):
return n if D == 3 else [n, n, n, m]
if D == 4:
self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1)
# Output of the first conv concated to conv6
self.inplanes = self.INIT_DIM
self.conv1p1s1 = conv(
in_channels,
self.inplanes,
kernel_size=space_n_time_m(config.conv1_kernel_size, 1),
stride=1,
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn1 = get_norm(self.NORM_TYPE, self.PLANES[0], D, bn_momentum=bn_momentum)
self.block1 = self._make_layer(
self.BLOCK,
self.PLANES[0],
self.LAYERS[0],
dilation=dilations[0],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv2p1s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn2 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block2 = self._make_layer(
self.BLOCK,
self.PLANES[1],
self.LAYERS[1],
dilation=dilations[1],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv3p2s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn3 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block3 = self._make_layer(
self.BLOCK,
self.PLANES[2],
self.LAYERS[2],
dilation=dilations[2],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv4p4s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn4 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block4 = self._make_layer(
self.BLOCK,
self.PLANES[3],
self.LAYERS[3],
dilation=dilations[3],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr4p8s2 = conv_tr(
self.inplanes,
self.PLANES[4],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr4 = get_norm(self.NORM_TYPE, self.PLANES[4], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion
self.block5 = self._make_layer(
self.BLOCK,
self.PLANES[4],
self.LAYERS[4],
dilation=dilations[4],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr5p4s2 = conv_tr(
self.inplanes,
self.PLANES[5],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr5 = get_norm(self.NORM_TYPE, self.PLANES[5], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion
self.block6 = self._make_layer(
self.BLOCK,
self.PLANES[5],
self.LAYERS[5],
dilation=dilations[5],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr6p2s2 = conv_tr(
self.inplanes,
self.PLANES[6],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr6 = get_norm(self.NORM_TYPE, self.PLANES[6], D, bn_momentum=bn_momentum)
self.relu = MinkowskiReLU(inplace=True)
self.final = nn.Sequential(
conv(
self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion,
512,
kernel_size=1,
stride=1,
dilation=1,
bias=False,
D=D), ME.MinkowskiBatchNorm(512), ME.MinkowskiReLU(),
conv(512, out_channels, kernel_size=1, stride=1, dilation=1, bias=True, D=D))
def forward(self, x):
out = self.conv1p1s1(x)
out = self.bn1(out)
out = self.relu(out)
out_b1p1 = self.block1(out)
out = self.conv2p1s2(out_b1p1)
out = self.bn2(out)
out = self.relu(out)
out_b2p2 = self.block2(out)
out = self.conv3p2s2(out_b2p2)
out = self.bn3(out)
out = self.relu(out)
out_b3p4 = self.block3(out)
out = self.conv4p4s2(out_b3p4)
out = self.bn4(out)
out = self.relu(out)
# pixel_dist=8
out = self.block4(out)
out = self.convtr4p8s2(out)
out = self.bntr4(out)
out = self.relu(out)
out = me.cat(out, out_b3p4)
out = self.block5(out)
out = self.convtr5p4s2(out)
out = self.bntr5(out)
out = self.relu(out)
out = me.cat(out, out_b2p2)
out = self.block6(out)
out = self.convtr6p2s2(out)
out = self.bntr6(out)
out = self.relu(out)
out = me.cat(out, out_b1p1)
return self.final(out)
class ResUNet14(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (1, 1, 1, 1, 1, 1)
class ResUNet18(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (2, 2, 2, 2, 2, 2)
class ResUNet18INBN(ResUNet18):
NORM_TYPE = NormType.INSTANCE_BATCH_NORM
BLOCK = BasicBlockINBN
class ResUNet34(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (3, 4, 6, 3, 2, 2)
class ResUNet50(MinkUNetBase):
BLOCK = Bottleneck
LAYERS = (3, 4, 6, 3, 2, 2)
class ResUNet101(MinkUNetBase):
BLOCK = Bottleneck
LAYERS = (3, 4, 23, 3, 2, 2)
class ResUNet14D(ResUNet14):
PLANES = (64, 128, 256, 512, 512, 512, 512)
class ResUNet18D(ResUNet18):
PLANES = (64, 128, 256, 512, 512, 512, 512)
class ResUNet34D(ResUNet34):
PLANES = (64, 128, 256, 512, 512, 512, 512)
class ResUNet34E(ResUNet34):
INIT_DIM = 32
PLANES = (32, 64, 128, 256, 128, 64, 64)
class ResUNet34F(ResUNet34):
INIT_DIM = 32
PLANES = (32, 64, 128, 256, 128, 64, 32)
class MinkUNetHyper(MinkUNetBase):
BLOCK = None
PLANES = (64, 128, 256, 512, 256, 128, 128)
DILATIONS = (1, 1, 1, 1, 1, 1)
LAYERS = (2, 2, 2, 2, 2, 2)
INIT_DIM = 64
OUT_PIXEL_DIST = 1
NORM_TYPE = NormType.BATCH_NORM
NON_BLOCK_CONV_TYPE = ConvType.SPATIAL_HYPERCUBE
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS
# To use the model, must call initialize_coords before forward pass.
# Once data is processed, call clear to reset the model before calling initialize_coords
def __init__(self, in_channels, out_channels, config, D=3, **kwargs):
super(MinkUNetBase, self).__init__(in_channels, out_channels, config, D)
def network_initialization(self, in_channels, out_channels, config, D):
# Setup net_metadata
dilations = self.DILATIONS
bn_momentum = config.bn_momentum
def space_n_time_m(n, m):
return n if D == 3 else [n, n, n, m]
if D == 4:
self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1)
# Output of the first conv concated to conv6
self.inplanes = self.INIT_DIM
self.conv1p1s1 = conv(
in_channels,
self.inplanes,
kernel_size=space_n_time_m(config.conv1_kernel_size, 1),
stride=1,
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn1 = get_norm(self.NORM_TYPE, self.PLANES[0], D, bn_momentum=bn_momentum)
self.block1 = self._make_layer(
self.BLOCK,
self.PLANES[0],
self.LAYERS[0],
dilation=dilations[0],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv2p1s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn2 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block2 = self._make_layer(
self.BLOCK,
self.PLANES[1],
self.LAYERS[1],
dilation=dilations[1],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv3p2s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn3 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block3 = self._make_layer(
self.BLOCK,
self.PLANES[2],
self.LAYERS[2],
dilation=dilations[2],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv4p4s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn4 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block4 = self._make_layer(
self.BLOCK,
self.PLANES[3],
self.LAYERS[3],
dilation=dilations[3],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.pool_tr4 = ME.MinkowskiPoolingTranspose(kernel_size=8, stride=8, dimension=D)
out_pool4 = self.inplanes
self.convtr4p8s2 = conv_tr(
self.inplanes,
self.PLANES[4],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr4 = get_norm(self.NORM_TYPE, self.PLANES[4], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion
self.block5 = self._make_layer(
self.BLOCK,
self.PLANES[4],
self.LAYERS[4],
dilation=dilations[4],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.pool_tr5 = ME.MinkowskiPoolingTranspose(kernel_size=4, stride=4, dimension=D)
out_pool5 = self.inplanes
self.convtr5p4s2 = conv_tr(
self.inplanes,
self.PLANES[5],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr5 = get_norm(self.NORM_TYPE, self.PLANES[5], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion
self.block6 = self._make_layer(
self.BLOCK,
self.PLANES[5],
self.LAYERS[5],
dilation=dilations[5],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.pool_tr6 = ME.MinkowskiPoolingTranspose(kernel_size=2, stride=2, dimension=D)
out_pool6 = self.inplanes
self.convtr6p2s2 = conv_tr(
self.inplanes,
self.PLANES[6],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr6 = get_norm(self.NORM_TYPE, self.PLANES[6], D, bn_momentum=bn_momentum)
self.relu = MinkowskiReLU(inplace=True)
self.final = nn.Sequential(
conv(
out_pool5 + out_pool6 + self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion,
512,
kernel_size=1,
bias=False,
D=D), ME.MinkowskiBatchNorm(512), ME.MinkowskiReLU(),
conv(512, out_channels, kernel_size=1, bias=True, D=D))
def forward(self, x):
out = self.conv1p1s1(x)
out = self.bn1(out)
out = self.relu(out)
out_b1p1 = self.block1(out)
out = self.conv2p1s2(out_b1p1)
out = self.bn2(out)
out = self.relu(out)
out_b2p2 = self.block2(out)
out = self.conv3p2s2(out_b2p2)
out = self.bn3(out)
out = self.relu(out)
out_b3p4 = self.block3(out)
out = self.conv4p4s2(out_b3p4)
out = self.bn4(out)
out = self.relu(out)
# pixel_dist=8
out = self.block4(out)
out = self.convtr4p8s2(out)
out = self.bntr4(out)
out = self.relu(out)
out = me.cat(out, out_b3p4)
out = self.block5(out)
out_5 = self.pool_tr5(out)
out = self.convtr5p4s2(out)
out = self.bntr5(out)
out = self.relu(out)
out = me.cat(out, out_b2p2)
out = self.block6(out)
out_6 = self.pool_tr6(out)
out = self.convtr6p2s2(out)
out = self.bntr6(out)
out = self.relu(out)
out = me.cat(out, out_b1p1, out_6, out_5)
return self.final(out)
class MinkUNetHyper14INBN(MinkUNetHyper):
NORM_TYPE = NormType.INSTANCE_BATCH_NORM
BLOCK = BasicBlockINBN
class STMinkUNetBase(MinkUNetBase):
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS
def __init__(self, in_channels, out_channels, config, D=4, **kwargs):
super(STMinkUNetBase, self).__init__(in_channels, out_channels, config, D, **kwargs)
class STResUNet14(STMinkUNetBase, ResUNet14):
pass
class STResUNet18(STMinkUNetBase, ResUNet18):
pass
class STResUNet34(STMinkUNetBase, ResUNet34):
pass
class STResUNet50(STMinkUNetBase, ResUNet50):
pass
class STResUNet101(STMinkUNetBase, ResUNet101):
pass
class STResTesseractUNetBase(STMinkUNetBase):
CONV_TYPE = ConvType.HYPERCUBE
class STResTesseractUNet14(STResTesseractUNetBase, ResUNet14):
pass
class STResTesseractUNet18(STResTesseractUNetBase, ResUNet18):
pass
class STResTesseractUNet34(STResTesseractUNetBase, ResUNet34):
pass
class STResTesseractUNet50(STResTesseractUNetBase, ResUNet50):
pass
class STResTesseractUNet101(STResTesseractUNetBase, ResUNet101):
pass
| ContrastiveSceneContexts-main | downstream/insseg/models/resunet.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import models.resunet as resunet
import models.res16unet as res16unet
MODELS = []
def add_models(module):
MODELS.extend([getattr(module, a) for a in dir(module) if 'Net' in a])
add_models(resunet)
add_models(res16unet)
def get_models():
'''Returns a tuple of sample models.'''
return MODELS
def load_model(name):
'''Creates and returns an instance of the model given its class name.
'''
# Find the model class from its name
all_models = get_models()
mdict = {model.__name__: model for model in all_models}
if name not in mdict:
print('Invalid model index. Options are:')
# Display a list of valid model names
for model in all_models:
print('\t* {}'.format(model.__name__))
return None
NetClass = mdict[name]
return NetClass
| ContrastiveSceneContexts-main | downstream/insseg/models/__init__.py |
from models.resnet import ResNetBase, get_norm
from models.modules.common import ConvType, NormType, conv, conv_tr
from models.modules.resnet_block import BasicBlock, Bottleneck
from MinkowskiEngine import MinkowskiReLU, SparseTensor
import MinkowskiEngine.MinkowskiOps as me
class Res16UNetBase(ResNetBase):
BLOCK = None
PLANES = (32, 64, 128, 256, 256, 256, 256, 256)
DILATIONS = (1, 1, 1, 1, 1, 1, 1, 1)
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
INIT_DIM = 32
OUT_PIXEL_DIST = 1
NORM_TYPE = NormType.BATCH_NORM
NON_BLOCK_CONV_TYPE = ConvType.SPATIAL_HYPERCUBE
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS
# To use the model, must call initialize_coords before forward pass.
# Once data is processed, call clear to reset the model before calling initialize_coords
def __init__(self, in_channels, out_channels, config, D=3, **kwargs):
super(Res16UNetBase, self).__init__(in_channels, out_channels, config, D)
def network_initialization(self, in_channels, out_channels, config, D):
# Setup net_metadata
dilations = self.DILATIONS
bn_momentum = config.optimizer.bn_momentum
def space_n_time_m(n, m):
return n if D == 3 else [n, n, n, m]
if D == 4:
self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1)
# Output of the first conv concated to conv6
self.inplanes = self.INIT_DIM
print("building model, ", in_channels)
self.conv0p1s1 = conv(
in_channels,
self.inplanes,
kernel_size=space_n_time_m(config.net.conv1_kernel_size, 1),
stride=1,
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn0 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.conv1p1s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn1 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block1 = self._make_layer(
self.BLOCK,
self.PLANES[0],
self.LAYERS[0],
dilation=dilations[0],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv2p2s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn2 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block2 = self._make_layer(
self.BLOCK,
self.PLANES[1],
self.LAYERS[1],
dilation=dilations[1],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv3p4s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn3 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block3 = self._make_layer(
self.BLOCK,
self.PLANES[2],
self.LAYERS[2],
dilation=dilations[2],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv4p8s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn4 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block4 = self._make_layer(
self.BLOCK,
self.PLANES[3],
self.LAYERS[3],
dilation=dilations[3],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr4p16s2 = conv_tr(
self.inplanes,
self.PLANES[4],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr4 = get_norm(self.NORM_TYPE, self.PLANES[4], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion
self.block5 = self._make_layer(
self.BLOCK,
self.PLANES[4],
self.LAYERS[4],
dilation=dilations[4],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr5p8s2 = conv_tr(
self.inplanes,
self.PLANES[5],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr5 = get_norm(self.NORM_TYPE, self.PLANES[5], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion
self.block6 = self._make_layer(
self.BLOCK,
self.PLANES[5],
self.LAYERS[5],
dilation=dilations[5],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr6p4s2 = conv_tr(
self.inplanes,
self.PLANES[6],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr6 = get_norm(self.NORM_TYPE, self.PLANES[6], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion
self.block7 = self._make_layer(
self.BLOCK,
self.PLANES[6],
self.LAYERS[6],
dilation=dilations[6],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr7p2s2 = conv_tr(
self.inplanes,
self.PLANES[7],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr7 = get_norm(self.NORM_TYPE, self.PLANES[7], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[7] + self.INIT_DIM
self.block8 = self._make_layer(
self.BLOCK,
self.PLANES[7],
self.LAYERS[7],
dilation=dilations[7],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.final = conv(self.PLANES[7], out_channels, kernel_size=1, stride=1, bias=True, D=D)
self.relu = MinkowskiReLU(inplace=True)
self.offsets_pre = conv(self.inplanes, self.inplanes, kernel_size=1, stride=1, bias=True, D=D)
self.bntr_offset = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.offsets = conv(self.inplanes, 3, kernel_size=1, stride=1, bias=True, D=D)
def forward(self, x, detach=False):
out = self.conv0p1s1(x)
out = self.bn0(out)
out_p1 = self.relu(out)
out = self.conv1p1s2(out_p1)
out = self.bn1(out)
out = self.relu(out)
out_b1p2 = self.block1(out)
out = self.conv2p2s2(out_b1p2)
out = self.bn2(out)
out = self.relu(out)
out_b2p4 = self.block2(out)
out = self.conv3p4s2(out_b2p4)
out = self.bn3(out)
out = self.relu(out)
out_b3p8 = self.block3(out)
# pixel_dist=16
out = self.conv4p8s2(out_b3p8)
out = self.bn4(out)
out = self.relu(out)
out = self.block4(out)
# pixel_dist=8
out = self.convtr4p16s2(out)
out = self.bntr4(out)
out = self.relu(out)
out = me.cat(out, out_b3p8)
out = self.block5(out)
# pixel_dist=4
out = self.convtr5p8s2(out)
out = self.bntr5(out)
out = self.relu(out)
out = me.cat(out, out_b2p4)
out = self.block6(out)
# pixel_dist=2
out = self.convtr6p4s2(out)
out = self.bntr6(out)
out = self.relu(out)
out = me.cat(out, out_b1p2)
out = self.block7(out)
# pixel_dist=1
out = self.convtr7p2s2(out)
out = self.bntr7(out)
out = self.relu(out)
out = me.cat(out, out_p1)
out = self.block8(out)
offsets = self.offsets_pre(out)
offsets = self.bntr_offset(offsets)
offsets = self.relu(offsets)
offsets = self.offsets(offsets)
return offsets, self.final(out), out
class Res16UNet14(Res16UNetBase):
BLOCK = BasicBlock
LAYERS = (1, 1, 1, 1, 1, 1, 1, 1)
class Res16UNet18(Res16UNetBase):
BLOCK = BasicBlock
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
class Res16UNet34(Res16UNetBase):
BLOCK = BasicBlock
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class Res16UNet50(Res16UNetBase):
BLOCK = Bottleneck
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class Res16UNet101(Res16UNetBase):
BLOCK = Bottleneck
LAYERS = (2, 3, 4, 23, 2, 2, 2, 2)
class Res16UNet14A(Res16UNet14):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class Res16UNet14A2(Res16UNet14A):
LAYERS = (1, 1, 1, 1, 2, 2, 2, 2)
class Res16UNet14B(Res16UNet14):
PLANES = (32, 64, 128, 256, 128, 128, 128, 128)
class Res16UNet14B2(Res16UNet14B):
LAYERS = (1, 1, 1, 1, 2, 2, 2, 2)
class Res16UNet14B3(Res16UNet14B):
LAYERS = (2, 2, 2, 2, 1, 1, 1, 1)
class Res16UNet14C(Res16UNet14):
PLANES = (32, 64, 128, 256, 192, 192, 128, 128)
class Res16UNet14D(Res16UNet14):
PLANES = (32, 64, 128, 256, 384, 384, 384, 384)
class Res16UNet18A(Res16UNet18):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class Res16UNet18B(Res16UNet18):
PLANES = (32, 64, 128, 256, 128, 128, 128, 128)
class Res16UNet18D(Res16UNet18):
PLANES = (32, 64, 128, 256, 384, 384, 384, 384)
class Res16UNet34A(Res16UNet34):
PLANES = (32, 64, 128, 256, 256, 128, 64, 64)
class Res16UNet34B(Res16UNet34):
PLANES = (32, 64, 128, 256, 256, 128, 64, 32)
class Res16UNet34C(Res16UNet34):
PLANES = (32, 64, 128, 256, 256, 128, 96, 96)
class STRes16UNetBase(Res16UNetBase):
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS
def __init__(self, in_channels, out_channels, config, D=4, **kwargs):
super(STRes16UNetBase, self).__init__(in_channels, out_channels, config, D, **kwargs)
class STRes16UNet14(STRes16UNetBase, Res16UNet14):
pass
class STRes16UNet14A(STRes16UNetBase, Res16UNet14A):
pass
class STRes16UNet18(STRes16UNetBase, Res16UNet18):
pass
class STRes16UNet34(STRes16UNetBase, Res16UNet34):
pass
class STRes16UNet50(STRes16UNetBase, Res16UNet50):
pass
class STRes16UNet101(STRes16UNetBase, Res16UNet101):
pass
class STRes16UNet18A(STRes16UNet18):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class STResTesseract16UNetBase(STRes16UNetBase):
CONV_TYPE = ConvType.HYPERCUBE
class STResTesseract16UNet18A(STRes16UNet18A, STResTesseract16UNetBase):
pass
| ContrastiveSceneContexts-main | downstream/insseg/models/res16unet.py |
from MinkowskiEngine import MinkowskiNetwork
class Model(MinkowskiNetwork):
"""
Base network for all sparse convnet
By default, all networks are segmentation networks.
"""
OUT_PIXEL_DIST = -1
def __init__(self, in_channels, out_channels, config, D, **kwargs):
super(Model, self).__init__(D)
self.in_channels = in_channels
self.out_channels = out_channels
self.config = config
class HighDimensionalModel(Model):
"""
Base network for all spatio (temporal) chromatic sparse convnet
"""
def __init__(self, in_channels, out_channels, config, D, **kwargs):
assert D > 4, "Num dimension smaller than 5"
super(HighDimensionalModel, self).__init__(in_channels, out_channels, config, D, **kwargs)
| ContrastiveSceneContexts-main | downstream/insseg/models/model.py |
import torch.nn as nn
from models.common import get_norm
import MinkowskiEngine as ME
import MinkowskiEngine.MinkowskiFunctional as MEF
class BasicBlockBase(nn.Module):
expansion = 1
NORM_TYPE = 'BN'
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
bn_momentum=0.1,
D=3):
super(BasicBlockBase, self).__init__()
self.conv1 = ME.MinkowskiConvolution(
inplanes, planes, kernel_size=3, stride=stride, dimension=D)
self.norm1 = get_norm(self.NORM_TYPE, planes, bn_momentum=bn_momentum, D=D)
self.conv2 = ME.MinkowskiConvolution(
planes,
planes,
kernel_size=3,
stride=1,
dilation=dilation,
has_bias=False,
dimension=D)
self.norm2 = get_norm(self.NORM_TYPE, planes, bn_momentum=bn_momentum, D=D)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = MEF.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = MEF.relu(out)
return out
class BasicBlockBN(BasicBlockBase):
NORM_TYPE = 'BN'
class BasicBlockIN(BasicBlockBase):
NORM_TYPE = 'IN'
def get_block(norm_type,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
bn_momentum=0.1,
D=3):
if norm_type == 'BN':
return BasicBlockBN(inplanes, planes, stride, dilation, downsample, bn_momentum, D)
elif norm_type == 'IN':
return BasicBlockIN(inplanes, planes, stride, dilation, downsample, bn_momentum, D)
else:
raise ValueError(f'Type {norm_type}, not defined')
| ContrastiveSceneContexts-main | downstream/insseg/models/residual_block.py |
import MinkowskiEngine as ME
def get_norm(norm_type, num_feats, bn_momentum=0.05, D=-1):
if norm_type == 'BN':
return ME.MinkowskiBatchNorm(num_feats, momentum=bn_momentum)
elif norm_type == 'IN':
return ME.MinkowskiInstanceNorm(num_feats, dimension=D)
else:
raise ValueError(f'Type {norm_type}, not defined')
| ContrastiveSceneContexts-main | downstream/insseg/models/common.py |
import torch.nn as nn
import MinkowskiEngine as ME
from models.model import Model
from models.modules.common import ConvType, NormType, get_norm, conv, sum_pool
from models.modules.resnet_block import BasicBlock, Bottleneck
class ResNetBase(Model):
BLOCK = None
LAYERS = ()
INIT_DIM = 64
PLANES = (64, 128, 256, 512)
OUT_PIXEL_DIST = 32
HAS_LAST_BLOCK = False
CONV_TYPE = ConvType.HYPERCUBE
def __init__(self, in_channels, out_channels, config, D=3, **kwargs):
assert self.BLOCK is not None
assert self.OUT_PIXEL_DIST > 0
super(ResNetBase, self).__init__(in_channels, out_channels, config, D, **kwargs)
self.network_initialization(in_channels, out_channels, config, D)
self.weight_initialization()
def network_initialization(self, in_channels, out_channels, config, D):
def space_n_time_m(n, m):
return n if D == 3 else [n, n, n, m]
if D == 4:
self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1)
dilations = config.dilations
bn_momentum = config.bn_momentum
self.inplanes = self.INIT_DIM
self.conv1 = conv(
in_channels,
self.inplanes,
kernel_size=space_n_time_m(config.conv1_kernel_size, 1),
stride=1,
D=D)
self.bn1 = get_norm(NormType.BATCH_NORM, self.inplanes, D=self.D, bn_momentum=bn_momentum)
self.relu = ME.MinkowskiReLU(inplace=True)
self.pool = sum_pool(kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), D=D)
self.layer1 = self._make_layer(
self.BLOCK,
self.PLANES[0],
self.LAYERS[0],
stride=space_n_time_m(2, 1),
dilation=space_n_time_m(dilations[0], 1))
self.layer2 = self._make_layer(
self.BLOCK,
self.PLANES[1],
self.LAYERS[1],
stride=space_n_time_m(2, 1),
dilation=space_n_time_m(dilations[1], 1))
self.layer3 = self._make_layer(
self.BLOCK,
self.PLANES[2],
self.LAYERS[2],
stride=space_n_time_m(2, 1),
dilation=space_n_time_m(dilations[2], 1))
self.layer4 = self._make_layer(
self.BLOCK,
self.PLANES[3],
self.LAYERS[3],
stride=space_n_time_m(2, 1),
dilation=space_n_time_m(dilations[3], 1))
self.final = conv(
self.PLANES[3] * self.BLOCK.expansion, out_channels, kernel_size=1, bias=True, D=D)
def weight_initialization(self):
for m in self.modules():
if isinstance(m, ME.MinkowskiBatchNorm):
nn.init.constant_(m.bn.weight, 1)
nn.init.constant_(m.bn.bias, 0)
def _make_layer(self,
block,
planes,
blocks,
stride=1,
dilation=1,
norm_type=NormType.BATCH_NORM,
bn_momentum=0.1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False,
D=self.D),
get_norm(norm_type, planes * block.expansion, D=self.D, bn_momentum=bn_momentum),
)
layers = []
layers.append(
block(
self.inplanes,
planes,
stride=stride,
dilation=dilation,
downsample=downsample,
conv_type=self.CONV_TYPE,
D=self.D))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
stride=1,
dilation=dilation,
conv_type=self.CONV_TYPE,
D=self.D))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.pool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.final(x)
return x
class ResNet14(ResNetBase):
BLOCK = BasicBlock
LAYERS = (1, 1, 1, 1)
class ResNet18(ResNetBase):
BLOCK = BasicBlock
LAYERS = (2, 2, 2, 2)
class ResNet34(ResNetBase):
BLOCK = BasicBlock
LAYERS = (3, 4, 6, 3)
class ResNet50(ResNetBase):
BLOCK = Bottleneck
LAYERS = (3, 4, 6, 3)
class ResNet101(ResNetBase):
BLOCK = Bottleneck
LAYERS = (3, 4, 23, 3)
class STResNetBase(ResNetBase):
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS
def __init__(self, in_channels, out_channels, config, D=4, **kwargs):
super(STResNetBase, self).__init__(in_channels, out_channels, config, D, **kwargs)
class STResNet14(STResNetBase, ResNet14):
pass
class STResNet18(STResNetBase, ResNet18):
pass
class STResNet34(STResNetBase, ResNet34):
pass
class STResNet50(STResNetBase, ResNet50):
pass
class STResNet101(STResNetBase, ResNet101):
pass
class STResTesseractNetBase(STResNetBase):
CONV_TYPE = ConvType.HYPERCUBE
class STResTesseractNet14(STResTesseractNetBase, STResNet14):
pass
class STResTesseractNet18(STResTesseractNetBase, STResNet18):
pass
class STResTesseractNet34(STResTesseractNetBase, STResNet34):
pass
class STResTesseractNet50(STResTesseractNetBase, STResNet50):
pass
class STResTesseractNet101(STResTesseractNetBase, STResNet101):
pass
| ContrastiveSceneContexts-main | downstream/insseg/models/resnet.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
from models.modules.common import ConvType, NormType, get_norm, conv
from MinkowskiEngine import MinkowskiReLU
class BasicBlockBase(nn.Module):
expansion = 1
NORM_TYPE = NormType.BATCH_NORM
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
conv_type=ConvType.HYPERCUBE,
bn_momentum=0.1,
D=3):
super(BasicBlockBase, self).__init__()
self.conv1 = conv(
inplanes, planes, kernel_size=3, stride=stride, dilation=dilation, conv_type=conv_type, D=D)
self.norm1 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum)
self.conv2 = conv(
planes,
planes,
kernel_size=3,
stride=1,
dilation=dilation,
bias=False,
conv_type=conv_type,
D=D)
self.norm2 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum)
self.relu = MinkowskiReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class BasicBlock(BasicBlockBase):
NORM_TYPE = NormType.BATCH_NORM
class BasicBlockIN(BasicBlockBase):
NORM_TYPE = NormType.INSTANCE_NORM
class BasicBlockINBN(BasicBlockBase):
NORM_TYPE = NormType.INSTANCE_BATCH_NORM
class BottleneckBase(nn.Module):
expansion = 4
NORM_TYPE = NormType.BATCH_NORM
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
conv_type=ConvType.HYPERCUBE,
bn_momentum=0.1,
D=3):
super(BottleneckBase, self).__init__()
self.conv1 = conv(inplanes, planes, kernel_size=1, D=D)
self.norm1 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum)
self.conv2 = conv(
planes, planes, kernel_size=3, stride=stride, dilation=dilation, conv_type=conv_type, D=D)
self.norm2 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum)
self.conv3 = conv(planes, planes * self.expansion, kernel_size=1, D=D)
self.norm3 = get_norm(self.NORM_TYPE, planes * self.expansion, D, bn_momentum=bn_momentum)
self.relu = MinkowskiReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.norm3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(BottleneckBase):
NORM_TYPE = NormType.BATCH_NORM
class BottleneckIN(BottleneckBase):
NORM_TYPE = NormType.INSTANCE_NORM
class BottleneckINBN(BottleneckBase):
NORM_TYPE = NormType.INSTANCE_BATCH_NORM
| ContrastiveSceneContexts-main | downstream/insseg/models/modules/resnet_block.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
import MinkowskiEngine as ME
from models.modules.common import ConvType, NormType
from models.modules.resnet_block import BasicBlock, Bottleneck
class SELayer(nn.Module):
def __init__(self, channel, reduction=16, D=-1):
# Global coords does not require coords_key
super(SELayer, self).__init__()
self.fc = nn.Sequential(
ME.MinkowskiLinear(channel, channel // reduction), ME.MinkowskiReLU(inplace=True),
ME.MinkowskiLinear(channel // reduction, channel), ME.MinkowskiSigmoid())
self.pooling = ME.MinkowskiGlobalPooling(dimension=D)
self.broadcast_mul = ME.MinkowskiBroadcastMultiplication(dimension=D)
def forward(self, x):
y = self.pooling(x)
y = self.fc(y)
return self.broadcast_mul(x, y)
class SEBasicBlock(BasicBlock):
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
conv_type=ConvType.HYPERCUBE,
reduction=16,
D=-1):
super(SEBasicBlock, self).__init__(
inplanes,
planes,
stride=stride,
dilation=dilation,
downsample=downsample,
conv_type=conv_type,
D=D)
self.se = SELayer(planes, reduction=reduction, D=D)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.se(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class SEBasicBlockSN(SEBasicBlock):
NORM_TYPE = NormType.SPARSE_SWITCH_NORM
class SEBasicBlockIN(SEBasicBlock):
NORM_TYPE = NormType.SPARSE_INSTANCE_NORM
class SEBasicBlockLN(SEBasicBlock):
NORM_TYPE = NormType.SPARSE_LAYER_NORM
class SEBottleneck(Bottleneck):
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
conv_type=ConvType.HYPERCUBE,
D=3,
reduction=16):
super(SEBottleneck, self).__init__(
inplanes,
planes,
stride=stride,
dilation=dilation,
downsample=downsample,
conv_type=conv_type,
D=D)
self.se = SELayer(planes * self.expansion, reduction=reduction, D=D)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.norm3(out)
out = self.se(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class SEBottleneckSN(SEBottleneck):
NORM_TYPE = NormType.SPARSE_SWITCH_NORM
class SEBottleneckIN(SEBottleneck):
NORM_TYPE = NormType.SPARSE_INSTANCE_NORM
class SEBottleneckLN(SEBottleneck):
NORM_TYPE = NormType.SPARSE_LAYER_NORM
| ContrastiveSceneContexts-main | downstream/insseg/models/modules/senet_block.py |
ContrastiveSceneContexts-main | downstream/insseg/models/modules/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import collections
from enum import Enum
import torch.nn as nn
import MinkowskiEngine as ME
class NormType(Enum):
BATCH_NORM = 0
INSTANCE_NORM = 1
INSTANCE_BATCH_NORM = 2
def get_norm(norm_type, n_channels, D, bn_momentum=0.1):
if norm_type == NormType.BATCH_NORM:
return ME.MinkowskiBatchNorm(n_channels, momentum=bn_momentum)
elif norm_type == NormType.INSTANCE_NORM:
return ME.MinkowskiInstanceNorm(n_channels)
elif norm_type == NormType.INSTANCE_BATCH_NORM:
return nn.Sequential(
ME.MinkowskiInstanceNorm(n_channels),
ME.MinkowskiBatchNorm(n_channels, momentum=bn_momentum))
else:
raise ValueError(f'Norm type: {norm_type} not supported')
class ConvType(Enum):
"""
Define the kernel region type
"""
HYPERCUBE = 0, 'HYPERCUBE'
SPATIAL_HYPERCUBE = 1, 'SPATIAL_HYPERCUBE'
SPATIO_TEMPORAL_HYPERCUBE = 2, 'SPATIO_TEMPORAL_HYPERCUBE'
HYPERCROSS = 3, 'HYPERCROSS'
SPATIAL_HYPERCROSS = 4, 'SPATIAL_HYPERCROSS'
SPATIO_TEMPORAL_HYPERCROSS = 5, 'SPATIO_TEMPORAL_HYPERCROSS'
SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS = 6, 'SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS '
def __new__(cls, value, name):
member = object.__new__(cls)
member._value_ = value
member.fullname = name
return member
def __int__(self):
return self.value
# Covert the ConvType var to a RegionType var
conv_to_region_type = {
# kernel_size = [k, k, k, 1]
ConvType.HYPERCUBE: ME.RegionType.HYPERCUBE,
ConvType.SPATIAL_HYPERCUBE: ME.RegionType.HYPERCUBE,
ConvType.SPATIO_TEMPORAL_HYPERCUBE: ME.RegionType.HYPERCUBE,
ConvType.HYPERCROSS: ME.RegionType.HYPERCROSS,
ConvType.SPATIAL_HYPERCROSS: ME.RegionType.HYPERCROSS,
ConvType.SPATIO_TEMPORAL_HYPERCROSS: ME.RegionType.HYPERCROSS,
ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS: ME.RegionType.HYBRID
}
int_to_region_type = {m.value: m for m in ME.RegionType}
def convert_region_type(region_type):
"""
Convert the integer region_type to the corresponding RegionType enum object.
"""
return int_to_region_type[region_type]
def convert_conv_type(conv_type, kernel_size, D):
assert isinstance(conv_type, ConvType), "conv_type must be of ConvType"
region_type = conv_to_region_type[conv_type]
axis_types = None
if conv_type == ConvType.SPATIAL_HYPERCUBE:
# No temporal convolution
if isinstance(kernel_size, collections.Sequence):
kernel_size = kernel_size[:3]
else:
kernel_size = [
kernel_size,
] * 3
if D == 4:
kernel_size.append(1)
elif conv_type == ConvType.SPATIO_TEMPORAL_HYPERCUBE:
# conv_type conversion already handled
assert D == 4
elif conv_type == ConvType.HYPERCUBE:
# conv_type conversion already handled
pass
elif conv_type == ConvType.SPATIAL_HYPERCROSS:
if isinstance(kernel_size, collections.Sequence):
kernel_size = kernel_size[:3]
else:
kernel_size = [
kernel_size,
] * 3
if D == 4:
kernel_size.append(1)
elif conv_type == ConvType.HYPERCROSS:
# conv_type conversion already handled
pass
elif conv_type == ConvType.SPATIO_TEMPORAL_HYPERCROSS:
# conv_type conversion already handled
assert D == 4
elif conv_type == ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS:
# Define the CUBIC conv kernel for spatial dims and CROSS conv for temp dim
axis_types = [
ME.RegionType.HYPERCUBE,
] * 3
if D == 4:
axis_types.append(ME.RegionType.HYPERCROSS)
return region_type, axis_types, kernel_size
def conv(in_planes,
out_planes,
kernel_size,
stride=1,
dilation=1,
bias=False,
conv_type=ConvType.HYPERCUBE,
D=-1):
assert D > 0, 'Dimension must be a positive integer'
region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D)
kernel_generator = ME.KernelGenerator(
kernel_size, stride, dilation, region_type=region_type, axis_types=axis_types, dimension=D)
return ME.MinkowskiConvolution(
in_channels=in_planes,
out_channels=out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
has_bias=bias,
kernel_generator=kernel_generator,
dimension=D)
def conv_tr(in_planes,
out_planes,
kernel_size,
upsample_stride=1,
dilation=1,
bias=False,
conv_type=ConvType.HYPERCUBE,
D=-1):
assert D > 0, 'Dimension must be a positive integer'
region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D)
kernel_generator = ME.KernelGenerator(
kernel_size,
upsample_stride,
dilation,
region_type=region_type,
axis_types=axis_types,
dimension=D)
return ME.MinkowskiConvolutionTranspose(
in_channels=in_planes,
out_channels=out_planes,
kernel_size=kernel_size,
stride=upsample_stride,
dilation=dilation,
has_bias=bias,
kernel_generator=kernel_generator,
dimension=D)
def avg_pool(kernel_size,
stride=1,
dilation=1,
conv_type=ConvType.HYPERCUBE,
in_coords_key=None,
D=-1):
assert D > 0, 'Dimension must be a positive integer'
region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D)
kernel_generator = ME.KernelGenerator(
kernel_size, stride, dilation, region_type=region_type, axis_types=axis_types, dimension=D)
return ME.MinkowskiAvgPooling(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
kernel_generator=kernel_generator,
dimension=D)
def avg_unpool(kernel_size, stride=1, dilation=1, conv_type=ConvType.HYPERCUBE, D=-1):
assert D > 0, 'Dimension must be a positive integer'
region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D)
kernel_generator = ME.KernelGenerator(
kernel_size, stride, dilation, region_type=region_type, axis_types=axis_types, dimension=D)
return ME.MinkowskiAvgUnpooling(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
kernel_generator=kernel_generator,
dimension=D)
def sum_pool(kernel_size, stride=1, dilation=1, conv_type=ConvType.HYPERCUBE, D=-1):
assert D > 0, 'Dimension must be a positive integer'
region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D)
kernel_generator = ME.KernelGenerator(
kernel_size, stride, dilation, region_type=region_type, axis_types=axis_types, dimension=D)
return ME.MinkowskiSumPooling(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
kernel_generator=kernel_generator,
dimension=D)
| ContrastiveSceneContexts-main | downstream/insseg/models/modules/common.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree. | ContrastiveSceneContexts-main | downstream/insseg/lib/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from torch.optim import SGD, Adam
from torch.optim.lr_scheduler import LambdaLR, StepLR
class LambdaStepLR(LambdaLR):
def __init__(self, optimizer, lr_lambda, last_step=-1):
super(LambdaStepLR, self).__init__(optimizer, lr_lambda, last_step)
@property
def last_step(self):
"""Use last_epoch for the step counter"""
return self.last_epoch
@last_step.setter
def last_step(self, v):
self.last_epoch = v
class PolyLR(LambdaStepLR):
"""DeepLab learning rate policy"""
def __init__(self, optimizer, max_iter, power=0.9, last_step=-1):
super(PolyLR, self).__init__(optimizer, lambda s: (1 - s / (max_iter + 1))**power, last_step)
class SquaredLR(LambdaStepLR):
""" Used for SGD Lars"""
def __init__(self, optimizer, max_iter, last_step=-1):
super(SquaredLR, self).__init__(optimizer, lambda s: (1 - s / (max_iter + 1))**2, last_step)
class ExpLR(LambdaStepLR):
def __init__(self, optimizer, step_size, gamma=0.9, last_step=-1):
# (0.9 ** 21.854) = 0.1, (0.95 ** 44.8906) = 0.1
# To get 0.1 every N using gamma 0.9, N * log(0.9)/log(0.1) = 0.04575749 N
# To get 0.1 every N using gamma g, g ** N = 0.1 -> N * log(g) = log(0.1) -> g = np.exp(log(0.1) / N)
super(ExpLR, self).__init__(optimizer, lambda s: gamma**(s / step_size), last_step)
def initialize_optimizer(params, config):
assert config.optimizer in ['SGD', 'Adagrad', 'Adam', 'RMSProp', 'Rprop', 'SGDLars']
if config.optimizer == 'SGD':
return SGD(
params,
lr=config.lr,
momentum=config.sgd_momentum,
dampening=config.sgd_dampening,
weight_decay=config.weight_decay)
elif config.optimizer == 'Adam':
return Adam(
params,
lr=config.lr,
betas=(config.adam_beta1, config.adam_beta2),
weight_decay=config.weight_decay)
else:
logging.error('Optimizer type not supported')
raise ValueError('Optimizer type not supported')
def initialize_scheduler(optimizer, config, last_step=-1):
if config.scheduler == 'StepLR':
return StepLR(
optimizer, step_size=config.step_size, gamma=config.step_gamma, last_epoch=last_step)
elif config.scheduler == 'PolyLR':
return PolyLR(optimizer, max_iter=config.max_iter, power=config.poly_power, last_step=last_step)
elif config.scheduler == 'SquaredLR':
return SquaredLR(optimizer, max_iter=config.max_iter, last_step=last_step)
elif config.scheduler == 'ExpLR':
return ExpLR(
optimizer, step_size=config.exp_step_size, gamma=config.exp_gamma, last_step=last_step)
else:
logging.error('Scheduler not supported')
| ContrastiveSceneContexts-main | downstream/insseg/lib/solvers.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import shutil
import tempfile
import warnings
import numpy as np
import torch
import torch.nn as nn
from sklearn.metrics import average_precision_score
from sklearn.preprocessing import label_binarize
from lib.utils import Timer, AverageMeter, precision_at_one, fast_hist, per_class_iu, \
get_prediction, get_torch_device, visualize_results, \
permute_pointcloud, save_rotation_pred
from MinkowskiEngine import SparseTensor
from lib.bfs.bfs import Clustering
from datasets.evaluation.evaluate_semantic_instance import Evaluator as InstanceEvaluator
from datasets.evaluation.evaluate_semantic_label import Evaluator as SemanticEvaluator
def print_info(iteration,
max_iteration,
data_time,
iter_time,
losses=None,
scores=None,
ious=None,
hist=None,
ap_class=None,
class_names=None):
debug_str = "{}/{}: ".format(iteration + 1, max_iteration)
debug_str += "Data time: {:.4f}, Iter time: {:.4f}".format(data_time, iter_time)
acc = hist.diagonal() / hist.sum(1) * 100
debug_str += "\tLoss {loss.val:.3f} (AVG: {loss.avg:.3f})\t" \
"Score {top1.val:.3f} (AVG: {top1.avg:.3f})\t" \
"mIOU {mIOU:.3f} mAP {mAP:.3f} mAcc {mAcc:.3f}\n".format(
loss=losses, top1=scores, mIOU=np.nanmean(ious),
mAP=np.nanmean(ap_class), mAcc=np.nanmean(acc))
if class_names is not None:
debug_str += "\nClasses: " + " ".join(class_names) + '\n'
debug_str += 'IOU: ' + ' '.join('{:.03f}'.format(i) for i in ious) + '\n'
debug_str += 'mAP: ' + ' '.join('{:.03f}'.format(i) for i in ap_class) + '\n'
debug_str += 'mAcc: ' + ' '.join('{:.03f}'.format(i) for i in acc) + '\n'
logging.info(debug_str)
def average_precision(prob_np, target_np):
num_class = prob_np.shape[1]
label = label_binarize(target_np, classes=list(range(num_class)))
with np.errstate(divide='ignore', invalid='ignore'):
return average_precision_score(label, prob_np, average=None)
def nms(instances, instances_):
instances_return = {}
counter = 0
for key in instances:
label = instances[key]['label_id'].item()
if label in [10, 12, 16]:
continue
instances_return[counter] = instances[key]
counter += 1
# dual set clustering, for some classes, w/o voting loss is better
for key_ in instances_:
label_ = instances_[key_]['label_id'].item()
if label_ in [10, 12, 16]:
instances_return[counter] = instances_[key_]
counter += 1
return instances_return
def test(model, data_loader, config):
device = get_torch_device(config.misc.is_cuda)
dataset = data_loader.dataset
num_labels = dataset.NUM_LABELS
global_timer, data_timer, iter_timer = Timer(), Timer(), Timer()
criterion = nn.CrossEntropyLoss(ignore_index=config.data.ignore_label)
losses, scores, ious = AverageMeter(), AverageMeter(), 0
aps = np.zeros((0, num_labels))
hist = np.zeros((num_labels, num_labels))
logging.info('===> Start testing')
global_timer.tic()
data_iter = data_loader.__iter__()
max_iter = len(data_loader)
max_iter_unique = max_iter
######################################################################################
# Added for Instance Segmentation
######################################################################################
VALID_CLASS_IDS = torch.FloatTensor(dataset.VALID_CLASS_IDS).long()
CLASS_LABELS_INSTANCE = dataset.CLASS_LABELS if config.misc.train_stuff else dataset.CLASS_LABELS_INSTANCE
VALID_CLASS_IDS_INSTANCE = dataset.VALID_CLASS_IDS if config.misc.train_stuff else dataset.VALID_CLASS_IDS_INSTANCE
IGNORE_LABELS_INSTANCE = dataset.IGNORE_LABELS if config.misc.train_stuff else dataset.IGNORE_LABELS_INSTANCE
evaluator = InstanceEvaluator(CLASS_LABELS_INSTANCE, VALID_CLASS_IDS_INSTANCE)
cluster_thresh = 1.5
propose_points = 100
score_func = torch.mean
if config.test.evaluate_benchmark:
cluster_thresh = 0.02
propose_points = 250
score_func = torch.median
cluster = Clustering(ignored_labels=IGNORE_LABELS_INSTANCE,
class_mapping=VALID_CLASS_IDS,
thresh=cluster_thresh,
score_func=score_func,
propose_points=propose_points,
closed_points=300,
min_points=50)
if config.test.dual_set_cluster :
# dual set clustering when submit to benchmark
cluster_ = Clustering(ignored_labels=IGNORE_LABELS_INSTANCE,
class_mapping=VALID_CLASS_IDS,
thresh=0.05,
score_func=torch.mean,
propose_points=250,
closed_points=300,
min_points=50)
######################################################################################
# Fix batch normalization running mean and std
model.eval()
# Clear cache (when run in val mode, cleanup training cache)
torch.cuda.empty_cache()
with torch.no_grad():
for iteration in range(max_iter):
data_timer.tic()
if config.data.return_transformation:
coords, input, target, instances, transformation = data_iter.next()
else:
coords, input, target, instances = data_iter.next()
transformation = None
data_time = data_timer.toc(False)
# Preprocess input
iter_timer.tic()
if config.net.wrapper_type != None:
color = input[:, :3].int()
if config.augmentation.normalize_color:
input[:, :3] = input[:, :3] / 255. - 0.5
sinput = SparseTensor(input, coords).to(device)
# Feed forward
inputs = (sinput,) if config.net.wrapper_type == None else (sinput, coords, color)
pt_offsets, soutput, out_feats = model(*inputs)
output = soutput.F
pred = get_prediction(dataset, output, target).int()
iter_time = iter_timer.toc(False)
#####################################################################################
# Added for Instance Segmentation
######################################################################################
if config.test.evaluate_benchmark:
# ---------------- point level -------------------
# voting loss for dual set clustering, w/o using ScoreNet
scene_id = dataset.get_output_id(iteration)
inverse_mapping = dataset.get_original_pointcloud(coords, transformation, iteration)
vertices = inverse_mapping[1] + pt_offsets.feats[inverse_mapping[0]].cpu().numpy()
features = output[inverse_mapping[0]]
instances = cluster.get_instances(vertices, features)
if config.test.dual_set_cluster:
instances_ = cluster_.get_instances(inverse_mapping[1], features)
instances = nms(instances, instances_)
evaluator.add_prediction(instances, scene_id)
# comment out when evaluate on benchmark format
# evaluator.add_gt_in_benchmark_format(scene_id)
evaluator.write_to_benchmark(scene_id=scene_id, pred_inst=instances)
else:
# --------------- voxel level------------------
vertices = coords.cpu().numpy()[:,1:] + pt_offsets.F.cpu().numpy() / dataset.VOXEL_SIZE
clusterred_result = cluster.get_instances(vertices, output.clone().cpu())
instance_ids = instances[0]['ids']
gt_labels = target.clone()
gt_labels[instance_ids == -1] = IGNORE_LABELS_INSTANCE[0] #invalid instance id is -1, map 0,1,255 labels to 0
gt_labels = VALID_CLASS_IDS[gt_labels.long()]
evaluator.add_gt((gt_labels*1000 + instance_ids).numpy(), iteration) # map invalid to invalid label, which is ignored anyway
evaluator.add_prediction(clusterred_result, iteration)
######################################################################################
target_np = target.numpy()
num_sample = target_np.shape[0]
target = target.to(device)
cross_ent = criterion(output, target.long())
losses.update(float(cross_ent), num_sample)
scores.update(precision_at_one(pred, target), num_sample)
hist += fast_hist(pred.cpu().numpy().flatten(), target_np.flatten(), num_labels)
ious = per_class_iu(hist) * 100
prob = torch.nn.functional.softmax(output, dim=1)
ap = average_precision(prob.cpu().detach().numpy(), target_np)
aps = np.vstack((aps, ap))
# Due to heavy bias in class, there exists class with no test label at all
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
ap_class = np.nanmean(aps, 0) * 100.
if iteration % config.test.test_stat_freq == 0 and iteration > 0:
reordered_ious = dataset.reorder_result(ious)
reordered_ap_class = dataset.reorder_result(ap_class)
class_names = dataset.get_classnames()
print_info(
iteration,
max_iter_unique,
data_time,
iter_time,
losses,
scores,
reordered_ious,
hist,
reordered_ap_class,
class_names=class_names)
if iteration % config.train.empty_cache_freq == 0:
# Clear cache
torch.cuda.empty_cache()
global_time = global_timer.toc(False)
reordered_ious = dataset.reorder_result(ious)
reordered_ap_class = dataset.reorder_result(ap_class)
class_names = dataset.get_classnames()
print_info(
iteration,
max_iter_unique,
data_time,
iter_time,
losses,
scores,
reordered_ious,
hist,
reordered_ap_class,
class_names=class_names)
logging.info("Finished test. Elapsed time: {:.4f}".format(global_time))
mAP50 = 0.0
#if not config.test.evaluate_benchmark:
_, mAP50, _ = evaluator.evaluate()
return losses.avg, scores.avg, np.nanmean(ap_class), np.nanmean(per_class_iu(hist)) * 100, mAP50
| ContrastiveSceneContexts-main | downstream/insseg/lib/test.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import logging
import os
import sys
import torch
import logging
import torch.nn.functional as F
from torch import nn
from torch.serialization import default_restore_location
from tensorboardX import SummaryWriter
from MinkowskiEngine import SparseTensor
from omegaconf import OmegaConf
from lib.distributed import get_world_size, all_gather, is_master_proc
from models import load_model
from lib.test import test as test_
from lib.solvers import initialize_optimizer, initialize_scheduler
from datasets import load_dataset
from datasets.dataset import initialize_data_loader
from lib.utils import checkpoint, precision_at_one, Timer, AverageMeter, get_prediction, load_state_with_same_shape, count_parameters
class SegmentationTrainer:
def __init__(self, config):
self.is_master = is_master_proc(config.misc.num_gpus) if config.misc.num_gpus > 1 else True
self.cur_device = torch.cuda.current_device()
# load the configurations
self.setup_logging()
if os.path.exists('config.yaml'):
logging.info('===> Loading exsiting config file')
config = OmegaConf.load('config.yaml')
logging.info('===> Loaded exsiting config file')
logging.info('===> Configurations')
logging.info(config.pretty())
# dataloader
DatasetClass = load_dataset(config.data.dataset)
logging.info('===> Initializing dataloader')
self.train_data_loader = initialize_data_loader(
DatasetClass, config, phase=config.train.train_phase,
num_workers=config.data.num_workers, augment_data=True,
shuffle=True, repeat=True, batch_size=config.data.batch_size // config.misc.num_gpus,
limit_numpoints=config.data.train_limit_numpoints)
self.val_data_loader = initialize_data_loader(
DatasetClass, config, phase=config.train.val_phase,
num_workers=1, augment_data=False,
shuffle=True, repeat=False,
batch_size=1, limit_numpoints=False)
self.test_data_loader = initialize_data_loader(
DatasetClass, config, phase=config.test.test_phase,
num_workers=config.data.num_workers, augment_data=False,
shuffle=False, repeat=False,
batch_size=config.data.test_batch_size, limit_numpoints=False)
# Model initialization
logging.info('===> Building model')
num_in_channel = self.train_data_loader.dataset.NUM_IN_CHANNEL
num_labels = self.train_data_loader.dataset.NUM_LABELS
NetClass = load_model(config.net.model)
model = NetClass(num_in_channel, num_labels, config)
logging.info('===> Number of trainable parameters: {}: {}'.format(NetClass.__name__, count_parameters(model)))
logging.info(model)
# Load weights if specified by the parameter.
if config.net.weights != '':
logging.info('===> Loading weights: ' + config.net.weights)
state = torch.load(config.net.weights, map_location=lambda s, l: default_restore_location(s, 'cpu'))
matched_weights = load_state_with_same_shape(model, state['state_dict'])
model_dict = model.state_dict()
model_dict.update(matched_weights)
model.load_state_dict(model_dict)
model = model.cuda()
if config.misc.num_gpus > 1:
model = torch.nn.parallel.DistributedDataParallel(
module=model, device_ids=[self.cur_device],
output_device=self.cur_device,
broadcast_buffers=False
)
self.config = config
self.model = model
if self.is_master:
self.writer = SummaryWriter(log_dir='tensorboard')
self.optimizer = initialize_optimizer(model.parameters(), config.optimizer)
self.scheduler = initialize_scheduler(self.optimizer, config.optimizer)
self.criterion = nn.CrossEntropyLoss(ignore_index=config.data.ignore_label)
checkpoint_fn = 'weights/weights.pth'
self.best_val_miou, self.best_val_miou_iter = -1,1
self.best_val_mAP, self.best_val_mAP_iter = -1,1
self.curr_iter, self.epoch, self.is_training = 1, 1, True
if os.path.isfile(checkpoint_fn):
logging.info("=> loading checkpoint '{}'".format(checkpoint_fn))
state = torch.load(checkpoint_fn, map_location=lambda s, l: default_restore_location(s, 'cpu'))
self.load_state(state['state_dict'])
self.curr_iter = state['iteration'] + 1
self.epoch = state['epoch']
self.scheduler = initialize_scheduler(self.optimizer, config.optimizer, last_step=self.curr_iter)
self.optimizer.load_state_dict(state['optimizer'])
if 'best_val_miou' in state:
self.best_val_miou = state['best_val_miou']
if 'best_val_mAP' in state:
self.best_val_mAP = state['best_val_mAP']
logging.info("=> loaded checkpoint '{}' (epoch {})".format(checkpoint_fn, state['epoch']))
else:
logging.info("=> no weights.pth")
def setup_logging(self):
ch = logging.StreamHandler(sys.stdout)
logging.getLogger().setLevel(logging.WARN)
if self.is_master:
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(
format=os.uname()[1].split('.')[0] + ' %(asctime)s %(message)s',
datefmt='%m/%d %H:%M:%S',
handlers=[ch])
def load_state(self, state):
if get_world_size() > 1:
_model = self.model.module
else:
_model = self.model
_model.load_state_dict(state)
def set_seed(self):
# Set seed based on args.seed and the update number so that we get
# reproducible results when resuming from checkpoints
seed = self.config.misc.seed + self.curr_iter
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def test(self):
return test_(self.model, self.test_data_loader, self.config)
def validate(self):
val_loss, val_score, _, val_miou, val_mAP = test_(self.model, self.val_data_loader, self.config)
self.writer.add_scalar('val/miou', val_miou, self.curr_iter)
self.writer.add_scalar('val/loss', val_loss, self.curr_iter)
self.writer.add_scalar('val/precision_at_1', val_score, self.curr_iter)
self.writer.add_scalar('val/[email protected]', val_mAP, self.curr_iter)
if val_miou > self.best_val_miou:
self.best_val_miou = val_miou
self.best_val_iou_iter = self.curr_iter
checkpoint(self.model, self.optimizer, self.epoch, self.curr_iter, self.config,
self.best_val_miou, self.best_val_mAP, "miou")
logging.info("Current best mIoU: {:.3f} at iter {}".format(self.best_val_miou, self.best_val_miou_iter))
if val_mAP > self.best_val_mAP:
self.best_val_mAP = val_mAP
self.best_val_mAP_iter = self.curr_iter
checkpoint(self.model, self.optimizer, self.epoch, self.curr_iter, self.config,
self.best_val_miou, self.best_val_mAP, "mAP")
logging.info("Current best [email protected]: {:.3f} at iter {}".format(self.best_val_mAP, self.best_val_mAP_iter))
checkpoint(self.model, self.optimizer, self.epoch, self.curr_iter, self.config,
self.best_val_miou, self.best_val_mAP)
def train(self):
# Set up the train flag for batch normalization
self.model.train()
# Configuration
data_timer, iter_timer = Timer(), Timer()
fw_timer, bw_timer, ddp_timer = Timer(), Timer(), Timer()
data_time_avg, iter_time_avg = AverageMeter(), AverageMeter()
fw_time_avg, bw_time_avg, ddp_time_avg = AverageMeter(), AverageMeter(), AverageMeter()
scores = AverageMeter()
losses = {
'semantic_loss': AverageMeter(),
'offset_dir_loss': AverageMeter(),
'offset_norm_loss': AverageMeter(),
'total_loss': AverageMeter()
}
# Train the network
logging.info('===> Start training on {} GPUs, batch-size={}'.format(
get_world_size(), self.config.data.batch_size))
data_iter = self.train_data_loader.__iter__() # (distributed) infinite sampler
while self.is_training:
for _ in range(len(self.train_data_loader) // self.config.optimizer.iter_size):
self.optimizer.zero_grad()
data_time, batch_score = 0, 0
batch_losses = {
'semantic_loss': 0.0,
'offset_dir_loss': 0.0,
'offset_norm_loss': 0.0,
'total_loss': 0.0}
iter_timer.tic()
# set random seed for every iteration for trackability
self.set_seed()
for sub_iter in range(self.config.optimizer.iter_size):
# Get training data
data_timer.tic()
if self.config.data.return_transformation:
coords, input, target, instances, _ = data_iter.next()
else:
coords, input, target, instances = data_iter.next()
# Preprocess input
color = input[:, :3].int()
if self.config.augmentation.normalize_color:
input[:, :3] = input[:, :3] / 255. - 0.5
sinput = SparseTensor(input, coords).to(self.cur_device)
data_time += data_timer.toc(False)
# Feed forward
fw_timer.tic()
inputs = (sinput,)
pt_offsets, soutput, _ = self.model(*inputs)
# The output of the network is not sorted
target = target.long().to(self.cur_device)
semantic_loss = self.criterion(soutput.F, target.long())
total_loss = semantic_loss
#-----------------offset loss----------------------
## pt_offsets: (N, 3), float, cuda
## coords: (N, 3), float32
## centers: (N, 3), float32 tensor
## instance_ids: (N), long
centers = np.concatenate([instance['center'] for instance in instances])
instance_ids = np.concatenate([instance['ids'] for instance in instances])
centers = torch.from_numpy(centers).cuda()
instance_ids = torch.from_numpy(instance_ids).cuda().long()
gt_offsets = centers - coords[:,1:].cuda() # (N, 3)
gt_offsets *= self.train_data_loader.dataset.VOXEL_SIZE
pt_diff = pt_offsets.F - gt_offsets # (N, 3)
pt_dist = torch.sum(torch.abs(pt_diff), dim=-1) # (N)
valid = (instance_ids != -1).float()
offset_norm_loss = torch.sum(pt_dist * valid) / (torch.sum(valid) + 1e-6)
gt_offsets_norm = torch.norm(gt_offsets, p=2, dim=1) # (N), float
gt_offsets_ = gt_offsets / (gt_offsets_norm.unsqueeze(-1) + 1e-8)
pt_offsets_norm = torch.norm(pt_offsets.F, p=2, dim=1)
pt_offsets_ = pt_offsets.F / (pt_offsets_norm.unsqueeze(-1) + 1e-8)
direction_diff = - (gt_offsets_ * pt_offsets_).sum(-1) # (N)
offset_dir_loss = torch.sum(direction_diff * valid) / (torch.sum(valid) + 1e-6)
total_loss += offset_norm_loss + offset_dir_loss
# Compute and accumulate gradient
total_loss /= self.config.optimizer.iter_size
pred = get_prediction(self.train_data_loader.dataset, soutput.F, target)
score = precision_at_one(pred, target)
# bp the loss
fw_timer.toc(False)
bw_timer.tic()
total_loss.backward()
bw_timer.toc(False)
# gather information
logging_output = {'total_loss': total_loss.item(), 'semantic_loss': semantic_loss.item(), 'score': score / self.config.optimizer.iter_size}
logging_output['offset_dir_loss'] = offset_dir_loss.item()
logging_output['offset_norm_loss'] = offset_norm_loss.item()
ddp_timer.tic()
if self.config.misc.num_gpus > 1:
logging_output = all_gather(logging_output)
logging_output = {w: np.mean([
a[w] for a in logging_output]
) for w in logging_output[0]}
batch_losses['total_loss'] += logging_output['total_loss']
batch_losses['semantic_loss'] += logging_output['semantic_loss']
batch_losses['offset_dir_loss'] += logging_output['offset_dir_loss']
batch_losses['offset_norm_loss'] += logging_output['offset_norm_loss']
batch_score += logging_output['score']
ddp_timer.toc(False)
# Update number of steps
self.optimizer.step()
self.scheduler.step()
data_time_avg.update(data_time)
iter_time_avg.update(iter_timer.toc(False))
fw_time_avg.update(fw_timer.diff)
bw_time_avg.update(bw_timer.diff)
ddp_time_avg.update(ddp_timer.diff)
losses['total_loss'].update(batch_losses['total_loss'], target.size(0))
losses['semantic_loss'].update(batch_losses['semantic_loss'], target.size(0))
losses['offset_dir_loss'].update(batch_losses['offset_dir_loss'], target.size(0))
losses['offset_norm_loss'].update(batch_losses['offset_norm_loss'], target.size(0))
scores.update(batch_score, target.size(0))
if self.curr_iter >= self.config.optimizer.max_iter:
self.is_training = False
break
if self.curr_iter % self.config.train.stat_freq == 0 or self.curr_iter == 1:
lrs = ', '.join(['{:.3e}'.format(x) for x in self.scheduler.get_last_lr()])
debug_str = "===> Epoch[{}]({}/{}): Loss {:.4f}, Sem {:.4f}, dir {:.4f}, norm {:.4f}\tLR: {}\t".format(
self.epoch, self.curr_iter, len(self.train_data_loader) // self.config.optimizer.iter_size,
losses['total_loss'].avg, losses['semantic_loss'].avg,
losses['offset_dir_loss'].avg, losses['offset_norm_loss'].avg, lrs)
debug_str += "Score {:.3f}\tData time: {:.4f}, Forward time: {:.4f}, Backward time: {:.4f}, DDP time: {:.4f}, Total iter time: {:.4f}".format(
scores.avg, data_time_avg.avg, fw_time_avg.avg, bw_time_avg.avg, ddp_time_avg.avg, iter_time_avg.avg)
logging.info(debug_str)
# Reset timers
data_time_avg.reset()
iter_time_avg.reset()
# Write logs
if self.is_master:
self.writer.add_scalar('train/loss', losses['total_loss'].avg, self.curr_iter)
self.writer.add_scalar('train/semantic_loss', losses['semantic_loss'].avg, self.curr_iter)
self.writer.add_scalar('train/offset_dir_loss', losses['offset_dir_loss'].avg, self.curr_iter)
self.writer.add_scalar('train/offset_norm_loss', losses['offset_norm_loss'].avg, self.curr_iter)
self.writer.add_scalar('train/precision_at_1', scores.avg, self.curr_iter)
self.writer.add_scalar('train/learning_rate', self.scheduler.get_last_lr()[0], self.curr_iter)
# clear loss
losses['total_loss'].reset()
losses['semantic_loss'].reset()
losses['offset_dir_loss'].reset()
losses['offset_norm_loss'].reset()
scores.reset()
# Validation
if self.curr_iter % self.config.train.val_freq == 0 and self.is_master:
self.validate()
self.model.train()
if self.curr_iter % self.config.train.empty_cache_freq == 0:
# Clear cache
torch.cuda.empty_cache()
# End of iteration
self.curr_iter += 1
self.epoch += 1
# Explicit memory cleanup
if hasattr(data_iter, 'cleanup'):
data_iter.cleanup()
# Save the final model
if self.is_master:
self.validate()
| ContrastiveSceneContexts-main | downstream/insseg/lib/ddp_trainer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import trimesh
# color palette for nyu40 labels
def create_color_palette():
return [
(0, 0, 0),
(174, 199, 232), # wall
(152, 223, 138), # floor
(31, 119, 180), # cabinet
(255, 187, 120), # bed
(188, 189, 34), # chair
(140, 86, 75), # sofa
(255, 152, 150), # table
(214, 39, 40), # door
(197, 176, 213), # window
(148, 103, 189), # bookshelf
(196, 156, 148), # picture
(23, 190, 207), # counter
(178, 76, 76),
(247, 182, 210), # desk
(66, 188, 102),
(219, 219, 141), # curtain
(140, 57, 197),
(202, 185, 52),
(51, 176, 203),
(200, 54, 131),
(92, 193, 61),
(78, 71, 183),
(172, 114, 82),
(255, 127, 14), # refrigerator
(91, 163, 138),
(153, 98, 156),
(140, 153, 101),
(158, 218, 229), # shower curtain
(100, 125, 154),
(178, 127, 135),
(120, 185, 128),
(146, 111, 194),
(44, 160, 44), # toilet
(112, 128, 144), # sink
(96, 207, 209),
(227, 119, 194), # bathtub
(213, 92, 176),
(94, 106, 211),
(82, 84, 163), # otherfurn
(100, 85, 144),
]
def write_triangle_mesh(vertices, colors, faces, outputFile):
mesh = trimesh.Trimesh(vertices=vertices, vertex_colors=colors, faces=faces, process=False)
mesh.export(outputFile)
def read_triangle_mesh(filename):
mesh = trimesh.load_mesh(filename, process=False)
if isinstance(mesh, trimesh.PointCloud):
vertices = mesh.vertices
colors = mesh.colors
faces = None
elif isinstance(mesh, trimesh.Trimesh):
vertices = mesh.vertices
colors = mesh.visual.vertex_colors
faces = mesh.faces
return vertices, colors, faces | ContrastiveSceneContexts-main | downstream/insseg/lib/io3d.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import time
import torch
import signal
import pickle
import threading
import functools
import traceback
import torch.nn as nn
import torch.distributed as dist
import multiprocessing as mp
"""Multiprocessing error handler."""
class ChildException(Exception):
"""Wraps an exception from a child process."""
def __init__(self, child_trace):
super(ChildException, self).__init__(child_trace)
class ErrorHandler(object):
"""Multiprocessing error handler (based on fairseq's).
Listens for errors in child processes and
propagates the tracebacks to the parent process.
"""
def __init__(self, error_queue):
# Shared error queue
self.error_queue = error_queue
# Children processes sharing the error queue
self.children_pids = []
# Start a thread listening to errors
self.error_listener = threading.Thread(target=self.listen, daemon=True)
self.error_listener.start()
# Register the signal handler
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
"""Registers a child process."""
self.children_pids.append(pid)
def listen(self):
"""Listens for errors in the error queue."""
# Wait until there is an error in the queue
child_trace = self.error_queue.get()
# Put the error back for the signal handler
self.error_queue.put(child_trace)
# Invoke the signal handler
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, sig_num, stack_frame):
"""Signal handler."""
# Kill children processes
for pid in self.children_pids:
os.kill(pid, signal.SIGINT)
# Propagate the error from the child process
raise ChildException(self.error_queue.get())
"""Multiprocessing helpers."""
def run(proc_rank, world_size, error_queue, fun, fun_args, fun_kwargs):
"""Runs a function from a child process."""
try:
# Initialize the process group
init_process_group(proc_rank, world_size)
# Run the function
fun(*fun_args, **fun_kwargs)
except:
# Propagate exception to the parent process
error_queue.put(traceback.format_exc())
finally:
destroy_process_group()
def multi_proc_run(num_proc, fun, fun_args=(), fun_kwargs={}):
"""Runs a function in a multi-proc setting."""
# Handle errors from training subprocesses
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Run each training subprocess
ps = []
for i in range(num_proc):
p_i = mp.Process(
target=run,
args=(i, num_proc, error_queue, fun, fun_args, fun_kwargs)
)
ps.append(p_i)
p_i.start()
error_handler.add_child(p_i.pid)
# Wait for each subprocess to finish
for p in ps:
p.join()
"""Distributed helpers."""
def is_master_proc(num_gpus):
"""Determines if the current process is the master process.
Master process is responsible for logging, writing and loading checkpoints.
In the multi GPU setting, we assign the master role to the rank 0 process.
When training using a single GPU, there is only one training processes
which is considered the master processes.
"""
return num_gpus == 1 or torch.distributed.get_rank() == 0
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def all_gather_differentiable(tensor):
"""
Run differentiable gather function for SparseConv features with variable number of points.
tensor: [num_points, feature_dim]
"""
world_size = get_world_size()
if world_size == 1:
return [tensor]
num_points, f_dim = tensor.size()
local_np = torch.LongTensor([num_points]).to("cuda")
np_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)]
dist.all_gather(np_list, local_np)
np_list = [int(np.item()) for np in np_list]
max_np = max(np_list)
tensor_list = []
for _ in np_list:
tensor_list.append(torch.FloatTensor(size=(max_np, f_dim)).to("cuda"))
if local_np != max_np:
padding = torch.zeros(size=(max_np-local_np, f_dim)).to("cuda").float()
tensor = torch.cat((tensor, padding), dim=0)
assert tensor.size() == (max_np, f_dim)
dist.all_gather(tensor_list, tensor)
data_list = []
for gather_np, gather_tensor in zip(np_list, tensor_list):
gather_tensor = gather_tensor[:gather_np]
assert gather_tensor.size() == (gather_np, f_dim)
data_list.append(gather_tensor)
return data_list
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.LongTensor([tensor.numel()]).to("cuda")
size_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda"))
if local_size != max_size:
padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def init_process_group(proc_rank, world_size):
"""Initializes the default process group."""
# Set the GPU to use
torch.cuda.set_device(proc_rank)
# Initialize the process group
torch.distributed.init_process_group(
backend="nccl",
init_method="tcp://{}:{}".format("localhost", "10001"),
world_size=world_size,
rank=proc_rank
)
def destroy_process_group():
"""Destroys the default process group."""
torch.distributed.destroy_process_group()
| ContrastiveSceneContexts-main | downstream/insseg/lib/distributed.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
from numpy.linalg import matrix_rank, inv
from plyfile import PlyData, PlyElement
import pandas as pd
COLOR_MAP_RGB = (
(241, 255, 82),
(102, 168, 226),
(0, 255, 0),
(113, 143, 65),
(89, 173, 163),
(254, 158, 137),
(190, 123, 75),
(100, 22, 116),
(0, 18, 141),
(84, 84, 84),
(85, 116, 127),
(255, 31, 33),
(228, 228, 228),
(0, 255, 0),
(70, 145, 150),
(237, 239, 94),
)
IGNORE_COLOR = (0, 0, 0)
def read_plyfile(filepath):
"""Read ply file and return it as numpy array. Returns None if emtpy."""
with open(filepath, 'rb') as f:
plydata = PlyData.read(f)
if plydata.elements:
return pd.DataFrame(plydata.elements[0].data).values
def save_point_cloud(points_3d, filename, binary=True, with_label=False, verbose=True):
"""Save an RGB point cloud as a PLY file.
Args:
points_3d: Nx6 matrix where points_3d[:, :3] are the XYZ coordinates and points_3d[:, 4:] are
the RGB values. If Nx3 matrix, save all points with [128, 128, 128] (gray) color.
"""
assert points_3d.ndim == 2
if with_label:
assert points_3d.shape[1] == 7
python_types = (float, float, float, int, int, int, int)
npy_types = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'),
('blue', 'u1'), ('label', 'u1')]
else:
if points_3d.shape[1] == 3:
gray_concat = np.tile(np.array([128], dtype=np.uint8), (points_3d.shape[0], 3))
points_3d = np.hstack((points_3d, gray_concat))
assert points_3d.shape[1] == 6
python_types = (float, float, float, int, int, int)
npy_types = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'),
('blue', 'u1')]
if binary is True:
# Format into NumPy structured array
vertices = []
for row_idx in range(points_3d.shape[0]):
cur_point = points_3d[row_idx]
vertices.append(tuple(dtype(point) for dtype, point in zip(python_types, cur_point)))
vertices_array = np.array(vertices, dtype=npy_types)
el = PlyElement.describe(vertices_array, 'vertex')
# Write
PlyData([el]).write(filename)
else:
# PlyData([el], text=True).write(filename)
with open(filename, 'w') as f:
f.write('ply\n'
'format ascii 1.0\n'
'element vertex %d\n'
'property float x\n'
'property float y\n'
'property float z\n'
'property uchar red\n'
'property uchar green\n'
'property uchar blue\n'
'property uchar alpha\n'
'end_header\n' % points_3d.shape[0])
for row_idx in range(points_3d.shape[0]):
X, Y, Z, R, G, B = points_3d[row_idx]
f.write('%f %f %f %d %d %d 0\n' % (X, Y, Z, R, G, B))
if verbose is True:
print('Saved point cloud to: %s' % filename)
class Camera(object):
def __init__(self, intrinsics):
self._intrinsics = intrinsics
self._camera_matrix = self.build_camera_matrix(self.intrinsics)
self._K_inv = inv(self.camera_matrix)
@staticmethod
def build_camera_matrix(intrinsics):
"""Build the 3x3 camera matrix K using the given intrinsics.
Equation 6.10 from HZ.
"""
f = intrinsics['focal_length']
pp_x = intrinsics['pp_x']
pp_y = intrinsics['pp_y']
K = np.array([[f, 0, pp_x], [0, f, pp_y], [0, 0, 1]], dtype=np.float32)
# K[:, 0] *= -1. # Step 1 of Kyle
assert matrix_rank(K) == 3
return K
@staticmethod
def extrinsics2RT(extrinsics):
"""Convert extrinsics matrix to separate rotation matrix R and translation vector T.
"""
assert extrinsics.shape == (4, 4)
R = extrinsics[:3, :3]
T = extrinsics[3, :3]
R = np.copy(R)
T = np.copy(T)
T = T.reshape(3, 1)
R[0, :] *= -1. # Step 1 of Kyle
T *= 100. # Convert from m to cm
return R, T
def project(self, points_3d, extrinsics=None):
"""Project a 3D point in camera coordinates into the camera/image plane.
Args:
point_3d:
"""
if extrinsics is not None: # Map points to camera coordinates
points_3d = self.world2camera(extrinsics, points_3d)
# TODO: Make sure to handle homogeneous AND non-homogeneous coordinate points
# TODO: Consider handling a set of points
raise NotImplementedError
def backproject(self,
depth_map,
labels=None,
max_depth=None,
max_height=None,
min_height=None,
rgb_img=None,
extrinsics=None,
prune=True):
"""Backproject a depth map into 3D points (camera coordinate system). Attach color if RGB image
is provided, otherwise use gray [128 128 128] color.
Does not show points at Z = 0 or maximum Z = 65535 depth.
Args:
labels: Tensor with the same shape as depth map (but can be 1-channel or 3-channel).
max_depth: Maximum depth in cm. All pts with depth greater than max_depth will be ignored.
max_height: Maximum height in cm. All pts with height greater than max_height will be ignored.
Returns:
points_3d: Numpy array of size Nx3 (XYZ) or Nx6 (XYZRGB).
"""
if labels is not None:
assert depth_map.shape[:2] == labels.shape[:2]
if (labels.ndim == 2) or ((labels.ndim == 3) and (labels.shape[2] == 1)):
n_label_channels = 1
elif (labels.ndim == 3) and (labels.shape[2] == 3):
n_label_channels = 3
if rgb_img is not None:
assert depth_map.shape[:2] == rgb_img.shape[:2]
else:
rgb_img = np.ones_like(depth_map, dtype=np.uint8) * 255
# Convert from 1-channel to 3-channel
if (rgb_img.ndim == 3) and (rgb_img.shape[2] == 1):
rgb_img = np.tile(rgb_img, [1, 1, 3])
# Convert depth map to single channel if it is multichannel
if (depth_map.ndim == 3) and depth_map.shape[2] == 3:
depth_map = np.squeeze(depth_map[:, :, 0])
depth_map = depth_map.astype(np.float32)
# Get image dimensions
H, W = depth_map.shape
# Create meshgrid (pixel coordinates)
Z = depth_map
A, B = np.meshgrid(range(W), range(H))
ones = np.ones_like(A)
grid = np.concatenate((A[:, :, np.newaxis], B[:, :, np.newaxis], ones[:, :, np.newaxis]),
axis=2)
grid = grid.astype(np.float32) * Z[:, :, np.newaxis]
# Nx3 where each row is (a*Z, b*Z, Z)
grid_flattened = grid.reshape((-1, 3))
grid_flattened = grid_flattened.T # 3xN where each col is (a*Z, b*Z, Z)
prod = np.dot(self.K_inv, grid_flattened)
XYZ = np.concatenate((prod[:2, :].T, Z.flatten()[:, np.newaxis]), axis=1) # Nx3
XYZRGB = np.hstack((XYZ, rgb_img.reshape((-1, 3))))
points_3d = XYZRGB
if labels is not None:
labels_reshaped = labels.reshape((-1, n_label_channels))
# Prune points
if prune is True:
valid = []
for idx in range(points_3d.shape[0]):
cur_y = points_3d[idx, 1]
cur_z = points_3d[idx, 2]
if (cur_z == 0) or (cur_z == 65535): # Don't show things at 0 distance or max distance
continue
elif (max_depth is not None) and (cur_z > max_depth):
continue
elif (max_height is not None) and (cur_y > max_height):
continue
elif (min_height is not None) and (cur_y < min_height):
continue
else:
valid.append(idx)
points_3d = points_3d[np.asarray(valid)]
if labels is not None:
labels_reshaped = labels_reshaped[np.asarray(valid)]
if extrinsics is not None:
points_3d = self.camera2world(extrinsics, points_3d)
if labels is not None:
points_3d_labels = np.hstack((points_3d[:, :3], labels_reshaped))
return points_3d, points_3d_labels
else:
return points_3d
@staticmethod
def _camera2world_transform(no_rgb_points_3d, R, T):
points_3d_world = (np.dot(R.T, no_rgb_points_3d.T) - T).T # Nx3
return points_3d_world
@staticmethod
def _world2camera_transform(no_rgb_points_3d, R, T):
points_3d_world = (np.dot(R, no_rgb_points_3d.T + T)).T # Nx3
return points_3d_world
def _transform_points(self, points_3d, extrinsics, transform):
"""Base/wrapper method for transforming points using R and T.
"""
assert points_3d.ndim == 2
orig_points_3d = points_3d
points_3d = np.copy(orig_points_3d)
if points_3d.shape[1] == 6: # XYZRGB
points_3d = points_3d[:, :3]
elif points_3d.shape[1] == 3: # XYZ
points_3d = points_3d
else:
raise ValueError('3D points need to be XYZ or XYZRGB.')
R, T = self.extrinsics2RT(extrinsics)
points_3d_world = transform(points_3d, R, T)
# Add color again (if appropriate)
if orig_points_3d.shape[1] == 6: # XYZRGB
points_3d_world = np.hstack((points_3d_world, orig_points_3d[:, -3:]))
return points_3d_world
def camera2world(self, extrinsics, points_3d):
"""Transform from camera coordinates (3D) to world coordinates (3D).
Args:
points_3d: Nx3 or Nx6 matrix of N points with XYZ or XYZRGB values.
"""
return self._transform_points(points_3d, extrinsics, self._camera2world_transform)
def world2camera(self, extrinsics, points_3d):
"""Transform from world coordinates (3D) to camera coordinates (3D).
"""
return self._transform_points(points_3d, extrinsics, self._world2camera_transform)
@property
def intrinsics(self):
return self._intrinsics
@property
def camera_matrix(self):
return self._camera_matrix
@property
def K_inv(self):
return self._K_inv
def colorize_pointcloud(xyz, label, ignore_label=255):
assert label[label != ignore_label].max() < len(COLOR_MAP_RGB), 'Not enough colors.'
label_rgb = np.array([COLOR_MAP_RGB[i] if i != ignore_label else IGNORE_COLOR for i in label])
return np.hstack((xyz, label_rgb))
class PlyWriter(object):
POINTCLOUD_DTYPE = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'),
('blue', 'u1')]
@classmethod
def read_txt(cls, txtfile):
# Read txt file and parse its content.
with open(txtfile) as f:
pointcloud = [l.split() for l in f]
# Load point cloud to named numpy array.
pointcloud = np.array(pointcloud).astype(np.float32)
assert pointcloud.shape[1] == 6
xyz = pointcloud[:, :3].astype(np.float32)
rgb = pointcloud[:, 3:].astype(np.uint8)
return xyz, rgb
@staticmethod
def write_ply(array, filepath):
ply_el = PlyElement.describe(array, 'vertex')
target_path, _ = os.path.split(filepath)
if target_path != '' and not os.path.exists(target_path):
os.makedirs(target_path)
PlyData([ply_el]).write(filepath)
@classmethod
def write_vertex_only_ply(cls, vertices, filepath):
# assume that points are N x 3 np array for vertex locations
color = 255 * np.ones((len(vertices), 3))
pc_points = np.array([tuple(p) for p in np.concatenate((vertices, color), axis=1)],
dtype=cls.POINTCLOUD_DTYPE)
cls.write_ply(pc_points, filepath)
@classmethod
def write_ply_vert_color(cls, vertices, colors, filepath):
# assume that points are N x 3 np array for vertex locations
pc_points = np.array([tuple(p) for p in np.concatenate((vertices, colors), axis=1)],
dtype=cls.POINTCLOUD_DTYPE)
cls.write_ply(pc_points, filepath)
@classmethod
def concat_label(cls, target, xyz, label):
subpointcloud = np.concatenate([xyz, label], axis=1)
subpointcloud = np.array([tuple(l) for l in subpointcloud], dtype=cls.POINTCLOUD_DTYPE)
return np.concatenate([target, subpointcloud], axis=0)
| ContrastiveSceneContexts-main | downstream/insseg/lib/pc_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
import os
import errno
import time
import torch
import numpy as np
from omegaconf import OmegaConf
from lib.pc_utils import colorize_pointcloud, save_point_cloud
from lib.distributed import get_world_size
def load_state_with_same_shape(model, weights):
# weights['conv1.kernel'] = weights['conv1.kernel'].repeat([1,3,1])/3.0
model_state = model.state_dict()
if list(weights.keys())[0].startswith('module.'):
logging.info("Loading multigpu weights with module. prefix...")
weights = {k.partition('module.')[2]:weights[k] for k in weights.keys()}
if list(weights.keys())[0].startswith('encoder.'):
logging.info("Loading multigpu weights with encoder. prefix...")
weights = {k.partition('encoder.')[2]:weights[k] for k in weights.keys()}
# print(weights.items())
# print("===================")
# print("===================")
# print("===================")
# print("===================")
# print("===================")
# print(model_state)
filtered_weights = {
k: v for k, v in weights.items() if k in model_state and v.size() == model_state[k].size()
}
logging.info("Loading weights:" + ', '.join(filtered_weights.keys()))
return filtered_weights
def checkpoint(model, optimizer, epoch, iteration, config, best_val_miou=None, best_val_mAP=None, postfix=None):
mkdir_p('weights')
filename = f"checkpoint_{config.net.model}_iter{iteration}.pth"
if config.train.overwrite_weights:
filename = f"checkpoint_{config.net.model}.pth"
if postfix is not None:
filename = f"checkpoint_{config.net.model}_{postfix}.pth"
checkpoint_file = 'weights/' + filename
_model = model.module if get_world_size() > 1 else model
state = {
'iteration': iteration,
'epoch': epoch,
'arch': config.net.model,
'state_dict': _model.state_dict(),
'optimizer': optimizer.state_dict()
}
if best_val_miou is not None:
state['best_val_miou'] = best_val_miou
state['best_val_iter'] = iteration
if best_val_mAP is not None:
state['best_val_mAP'] = best_val_mAP
state['best_val_iter'] = iteration
# save config
OmegaConf.save(config, 'config.yaml')
torch.save(state, checkpoint_file)
logging.info(f"Checkpoint saved to {checkpoint_file}")
if postfix == None:
# Delete symlink if it exists
if os.path.exists('weights/weights.pth'):
os.remove('weights/weights.pth')
# Create symlink
os.system('ln -s {} weights/weights.pth'.format(filename))
def precision_at_one(pred, target, ignore_label=255):
"""Computes the precision@k for the specified values of k"""
# batch_size = target.size(0) * target.size(1) * target.size(2)
pred = pred.view(1, -1)
target = target.view(1, -1)
correct = pred.eq(target)
correct = correct[target != ignore_label]
correct = correct.view(-1)
if correct.nelement():
return correct.float().sum(0).mul(100.0 / correct.size(0)).item()
else:
return float('nan')
def fast_hist(pred, label, n):
k = (label >= 0) & (label < n)
return np.bincount(n * label[k].astype(int) + pred[k], minlength=n**2).reshape(n, n)
def per_class_iu(hist):
with np.errstate(divide='ignore', invalid='ignore'):
return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
class WithTimer(object):
"""Timer for with statement."""
def __init__(self, name=None):
self.name = name
def __enter__(self):
self.tstart = time.time()
def __exit__(self, type, value, traceback):
out_str = 'Elapsed: %s' % (time.time() - self.tstart)
if self.name:
logging.info('[{self.name}]')
logging.info(out_str)
class Timer(object):
"""A simple timer."""
def __init__(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
def reset(self):
self.total_time = 0
self.calls = 0
self.start_time = 0
self.diff = 0
self.averate_time = 0
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.average_time = self.total_time / self.calls
if average:
return self.average_time
else:
return self.diff
class ExpTimer(Timer):
""" Exponential Moving Average Timer """
def __init__(self, alpha=0.5):
super(ExpTimer, self).__init__()
self.alpha = alpha
def toc(self):
self.diff = time.time() - self.start_time
self.average_time = self.alpha * self.diff + \
(1 - self.alpha) * self.average_time
return self.average_time
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def read_txt(path):
"""Read txt file into lines.
"""
with open(path) as f:
lines = f.readlines()
lines = [x.strip() for x in lines]
return lines
def debug_on():
import sys
import pdb
import functools
import traceback
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception:
info = sys.exc_info()
traceback.print_exception(*info)
pdb.post_mortem(info[2])
return wrapper
return decorator
def get_prediction(dataset, output, target):
return output.max(1)[1]
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def get_torch_device(is_cuda):
return torch.device('cuda' if is_cuda else 'cpu')
class HashTimeBatch(object):
def __init__(self, prime=5279):
self.prime = prime
def __call__(self, time, batch):
return self.hash(time, batch)
def hash(self, time, batch):
return self.prime * batch + time
def dehash(self, key):
time = key % self.prime
batch = key / self.prime
return time, batch
def save_rotation_pred(iteration, pred, dataset, save_pred_dir):
"""Save prediction results in original pointcloud scale."""
decode_label_map = {}
for k, v in dataset.label_map.items():
decode_label_map[v] = k
pred = np.array([decode_label_map[x] for x in pred], dtype=np.int)
out_rotation_txt = dataset.get_output_id(iteration) + '.txt'
out_rotation_path = save_pred_dir + '/' + out_rotation_txt
np.savetxt(out_rotation_path, pred, fmt='%i')
def visualize_results(coords, input, target, upsampled_pred, config, iteration):
# Get filter for valid predictions in the first batch.
target_batch = coords[:, 3].numpy() == 0
input_xyz = coords[:, :3].numpy()
target_valid = target.numpy() != 255
target_pred = np.logical_and(target_batch, target_valid)
target_nonpred = np.logical_and(target_batch, ~target_valid)
ptc_nonpred = np.hstack((input_xyz[target_nonpred], np.zeros((np.sum(target_nonpred), 3))))
# Unwrap file index if tested with rotation.
file_iter = iteration
if config.test_rotation >= 1:
file_iter = iteration // config.test.test_rotation
# Create directory to save visualization results.
os.makedirs(config.test.visualize_path, exist_ok=True)
# Label visualization in RGB.
xyzlabel = colorize_pointcloud(input_xyz[target_pred], upsampled_pred[target_pred])
xyzlabel = np.vstack((xyzlabel, ptc_nonpred))
filename = '_'.join([config.dataset, config.model, 'pred', '%04d.ply' % file_iter])
save_point_cloud(xyzlabel, os.path.join(config.test.visualize_path, filename), verbose=False)
# RGB input values visualization.
xyzrgb = np.hstack((input_xyz[target_batch], input[:, :3].cpu().numpy()[target_batch]))
filename = '_'.join([config.dataset, config.model, 'rgb', '%04d.ply' % file_iter])
save_point_cloud(xyzrgb, os.path.join(config.test.visualize_path, filename), verbose=False)
# Ground-truth visualization in RGB.
xyzgt = colorize_pointcloud(input_xyz[target_pred], target.numpy()[target_pred])
xyzgt = np.vstack((xyzgt, ptc_nonpred))
filename = '_'.join([config.dataset, config.model, 'gt', '%04d.ply' % file_iter])
save_point_cloud(xyzgt, os.path.join(config.test.visualize_path, filename), verbose=False)
def permute_pointcloud(input_coords, pointcloud, transformation, label_map,
voxel_output, voxel_pred):
"""Get permutation from pointcloud to input voxel coords."""
def _hash_coords(coords, coords_min, coords_dim):
return np.ravel_multi_index((coords - coords_min).T, coords_dim)
# Validate input.
input_batch_size = input_coords[:, -1].max().item()
pointcloud_batch_size = pointcloud[:, -1].max().int().item()
transformation_batch_size = transformation[:, -1].max().int().item()
assert input_batch_size == pointcloud_batch_size == transformation_batch_size
pointcloud_permutation, pointcloud_target = [], []
# Process each batch.
for i in range(input_batch_size + 1):
# Filter batch from the data.
input_coords_mask_b = input_coords[:, -1] == i
input_coords_b = (input_coords[input_coords_mask_b])[:, :-1].numpy()
pointcloud_b = pointcloud[pointcloud[:, -1] == i, :-1].numpy()
transformation_b = transformation[i, :-1].reshape(4, 4).numpy()
# Transform original pointcloud to voxel space.
original_coords1 = np.hstack((pointcloud_b[:, :3], np.ones((pointcloud_b.shape[0], 1))))
original_vcoords = np.floor(original_coords1 @ transformation_b.T)[:, :3].astype(int)
# Hash input and voxel coordinates to flat coordinate.
vcoords_all = np.vstack((input_coords_b, original_vcoords))
vcoords_min = vcoords_all.min(0)
vcoords_dims = vcoords_all.max(0) - vcoords_all.min(0) + 1
input_coords_key = _hash_coords(input_coords_b, vcoords_min, vcoords_dims)
original_vcoords_key = _hash_coords(original_vcoords, vcoords_min, vcoords_dims)
# Query voxel predictions from original pointcloud.
key_to_idx = dict(zip(input_coords_key, range(len(input_coords_key))))
pointcloud_permutation.append(
np.array([key_to_idx.get(i, -1) for i in original_vcoords_key]))
pointcloud_target.append(pointcloud_b[:, -1].astype(int))
pointcloud_permutation = np.concatenate(pointcloud_permutation)
# Prepare pointcloud permutation array.
pointcloud_permutation = torch.from_numpy(pointcloud_permutation)
permutation_mask = pointcloud_permutation >= 0
permutation_valid = pointcloud_permutation[permutation_mask]
# Permuate voxel output to pointcloud.
pointcloud_output = torch.zeros(pointcloud.shape[0], voxel_output.shape[1]).to(voxel_output)
pointcloud_output[permutation_mask] = voxel_output[permutation_valid]
# Permuate voxel prediction to pointcloud.
# NOTE: Invalid points (points found in pointcloud but not in the voxel) are mapped to 0.
pointcloud_pred = torch.ones(pointcloud.shape[0]).int().to(voxel_pred) * 0
pointcloud_pred[permutation_mask] = voxel_pred[permutation_valid]
# Map pointcloud target to respect dataset IGNORE_LABELS
pointcloud_target = torch.from_numpy(
np.array([label_map[i] for i in np.concatenate(pointcloud_target)])).int()
return pointcloud_output, pointcloud_pred, pointcloud_target
| ContrastiveSceneContexts-main | downstream/insseg/lib/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from scipy.sparse import csr_matrix
import torch
class SparseMM(torch.autograd.Function):
"""
Sparse x dense matrix multiplication with autograd support.
Implementation by Soumith Chintala:
https://discuss.pytorch.org/t/
does-pytorch-support-autograd-on-sparse-matrix/6156/7
"""
def forward(self, matrix1, matrix2):
self.save_for_backward(matrix1, matrix2)
return torch.mm(matrix1, matrix2)
def backward(self, grad_output):
matrix1, matrix2 = self.saved_tensors
grad_matrix1 = grad_matrix2 = None
if self.needs_input_grad[0]:
grad_matrix1 = torch.mm(grad_output, matrix2.t())
if self.needs_input_grad[1]:
grad_matrix2 = torch.mm(matrix1.t(), grad_output)
return grad_matrix1, grad_matrix2
def sparse_float_tensor(values, indices, size=None):
"""
Return a torch sparse matrix give values and indices (row_ind, col_ind).
If the size is an integer, return a square matrix with side size.
If the size is a torch.Size, use it to initialize the out tensor.
If none, the size is inferred.
"""
indices = torch.stack(indices).int()
sargs = [indices, values.float()]
if size is not None:
# Use the provided size
if isinstance(size, int):
size = torch.Size((size, size))
sargs.append(size)
if values.is_cuda:
return torch.cuda.sparse.FloatTensor(*sargs)
else:
return torch.sparse.FloatTensor(*sargs)
def diags(values, size=None):
values = values.view(-1)
n = values.nelement()
size = torch.Size((n, n))
indices = (torch.arange(0, n), torch.arange(0, n))
return sparse_float_tensor(values, indices, size)
def sparse_to_csr_matrix(tensor):
tensor = tensor.cpu()
inds = tensor._indices().numpy()
vals = tensor._values().numpy()
return csr_matrix((vals, (inds[0], inds[1])), shape=[s for s in tensor.shape])
def csr_matrix_to_sparse(mat):
row_ind, col_ind = mat.nonzero()
return sparse_float_tensor(
torch.from_numpy(mat.data),
(torch.from_numpy(row_ind), torch.from_numpy(col_ind)),
size=torch.Size(mat.shape))
| ContrastiveSceneContexts-main | downstream/insseg/lib/math_functions.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from MinkowskiEngine import MinkowskiGlobalPooling, MinkowskiBroadcastAddition, MinkowskiBroadcastMultiplication
class MinkowskiLayerNorm(nn.Module):
def __init__(self, num_features, eps=1e-5, D=-1):
super(MinkowskiLayerNorm, self).__init__()
self.num_features = num_features
self.eps = eps
self.weight = nn.Parameter(torch.ones(1, num_features))
self.bias = nn.Parameter(torch.zeros(1, num_features))
self.mean_in = MinkowskiGlobalPooling(dimension=D)
self.glob_sum = MinkowskiBroadcastAddition(dimension=D)
self.glob_sum2 = MinkowskiBroadcastAddition(dimension=D)
self.glob_mean = MinkowskiGlobalPooling(dimension=D)
self.glob_times = MinkowskiBroadcastMultiplication(dimension=D)
self.D = D
self.reset_parameters()
def __repr__(self):
s = f'(D={self.D})'
return self.__class__.__name__ + s
def reset_parameters(self):
self.weight.data.fill_(1)
self.bias.data.zero_()
def _check_input_dim(self, input):
if input.F.dim() != 2:
raise ValueError('expected 2D input (got {}D input)'.format(input.dim()))
def forward(self, x):
self._check_input_dim(x)
mean = self.mean_in(x).F.mean(-1, keepdim=True)
mean = mean + torch.zeros(mean.size(0), self.num_features).type_as(mean)
temp = self.glob_sum(x.F, -mean)**2
var = self.glob_mean(temp.data).mean(-1, keepdim=True)
var = var + torch.zeros(var.size(0), self.num_features).type_as(var)
instd = 1 / (var + self.eps).sqrt()
x = self.glob_times(self.glob_sum2(x, -mean), instd)
return x * self.weight + self.bias
class MinkowskiInstanceNorm(nn.Module):
def __init__(self, num_features, eps=1e-5, D=-1):
super(MinkowskiInstanceNorm, self).__init__()
self.eps = eps
self.weight = nn.Parameter(torch.ones(1, num_features))
self.bias = nn.Parameter(torch.zeros(1, num_features))
self.mean_in = MinkowskiGlobalPooling(dimension=D)
self.glob_sum = MinkowskiBroadcastAddition(dimension=D)
self.glob_sum2 = MinkowskiBroadcastAddition(dimension=D)
self.glob_mean = MinkowskiGlobalPooling(dimension=D)
self.glob_times = MinkowskiBroadcastMultiplication(dimension=D)
self.D = D
self.reset_parameters()
def __repr__(self):
s = f'(pixel_dist={self.pixel_dist}, D={self.D})'
return self.__class__.__name__ + s
def reset_parameters(self):
self.weight.data.fill_(1)
self.bias.data.zero_()
def _check_input_dim(self, input):
if input.dim() != 2:
raise ValueError('expected 2D input (got {}D input)'.format(input.dim()))
def forward(self, x):
self._check_input_dim(x)
mean_in = self.mean_in(x)
temp = self.glob_sum(x, -mean_in)**2
var_in = self.glob_mean(temp.data)
instd_in = 1 / (var_in + self.eps).sqrt()
x = self.glob_times(self.glob_sum2(x, -mean_in), instd_in)
return x * self.weight + self.bias
| ContrastiveSceneContexts-main | downstream/insseg/lib/layers.py |
import os
import torch
import numpy as np
from torch.autograd import Function
import argparse
#from lib.datasets.scannet.datagen.export_ids_per_vertex import read_segmentation, write_triangle_mesh
#from lib.utils.io import read_triangle_mesh, create_color_palette, write_triangle_mesh
#from lib.utils.scannet_benchmark_utils import util_3d
import PG_OP
class BallQueryBatchP(Function):
@staticmethod
def forward(ctx, coords, batch_idxs, batch_offsets, radius, meanActive):
'''
:param ctx:
:param coords: (n, 3) float
:param batch_idxs: (n) int
:param batch_offsets: (B+1) int
:param radius: float
:param meanActive: int
:return: idx (nActive), int
:return: start_len (n, 2), int
'''
n = coords.size(0)
assert coords.is_contiguous() and coords.is_cuda
assert batch_idxs.is_contiguous() and batch_idxs.is_cuda
assert batch_offsets.is_contiguous() and batch_offsets.is_cuda
while True:
idx = torch.cuda.IntTensor(n * meanActive).zero_()
start_len = torch.cuda.IntTensor(n, 2).zero_()
nActive = PG_OP.ballquery_batch_p(coords, batch_idxs, batch_offsets, idx, start_len, n, meanActive, radius)
if nActive <= n * meanActive:
break
meanActive = int(nActive // n + 1)
idx = idx[:nActive]
return idx, start_len
@staticmethod
def backward(ctx, a=None, b=None):
return None, None, None
ballquery_batch_p = BallQueryBatchP.apply
class BFSCluster(Function):
@staticmethod
def forward(ctx, semantic_label, ball_query_idxs, start_len, threshold):
'''
:param ctx:
:param semantic_label: (N), int
:param ball_query_idxs: (nActive), int
:param start_len: (N, 2), int
:return: cluster_idxs: int (sumNPoint, 2), dim 0 for cluster_id, dim 1 for corresponding point idxs in N
:return: cluster_offsets: int (nCluster + 1)
'''
N = start_len.size(0)
assert semantic_label.is_contiguous()
assert ball_query_idxs.is_contiguous()
assert start_len.is_contiguous()
cluster_idxs = semantic_label.new()
cluster_offsets = semantic_label.new()
PG_OP.bfs_cluster(semantic_label, ball_query_idxs, start_len, cluster_idxs, cluster_offsets, N, threshold)
return cluster_idxs, cluster_offsets
@staticmethod
def backward(ctx, a=None):
return None
bfs_cluster = BFSCluster.apply
class Clustering:
def __init__(self, ignored_labels, class_mapping, thresh=0.03,
closed_points=300,
min_points=50,
propose_points=100,
score_func=torch.max) -> None:
self.ignored_labels = ignored_labels
self.thresh = thresh
self.closed_points = closed_points
self.min_points = min_points
self.class_mapping = class_mapping.cuda()
self.propose_points = propose_points
self.score_func=score_func
def cluster(self, vertices, scores):
labels = torch.max(scores, 1)[1] # (N) long, cuda
proposals_idx, proposals_offset = self.cluster_(vertices, labels.cuda())
## debug
#import ipdb; ipdb.set_trace()
#colors = np.array(create_color_palette())[labels.cpu()]
#write_triangle_mesh(vertices, colors, None, 'semantics.ply')
# scatter
proposals_pred = torch.zeros((proposals_offset.shape[0] - 1, vertices.shape[0]), dtype=torch.int) # (nProposal, N), int, cuda
proposals_pred[proposals_idx[:, 0].long(), proposals_idx[:, 1].long()] = 1
labels = labels[proposals_idx[:, 1][proposals_offset[:-1].long()].long()]
proposals_pointnum = proposals_pred.sum(1)
npoint_mask = (proposals_pointnum > self.propose_points)
proposals_pred = proposals_pred[npoint_mask]
labels = labels[npoint_mask]
return proposals_pred, labels
def cluster_(self, vertices, labels):
'''
:param batch_idxs: (N), int, cuda
:labels: 0-19
'''
batch_idxs = torch.zeros_like(labels)
mask_non_ignored = torch.ones_like(labels).bool()
for ignored_label in self.ignored_labels:
mask_non_ignored = mask_non_ignored & (self.class_mapping[labels] != ignored_label)
object_idxs = mask_non_ignored.nonzero().view(-1)
vertices_ = torch.from_numpy(vertices)[object_idxs].float().cuda()
labels_ = labels[object_idxs].int().cuda()
if vertices_.numel() == 0:
return torch.zeros((0,2)).int(), torch.zeros(1).int()
batch_idxs_ = batch_idxs[object_idxs].int().cuda()
batch_offsets_ = torch.FloatTensor([0, object_idxs.shape[0]]).int().cuda()
idx, start_len = ballquery_batch_p(vertices_, batch_idxs_, batch_offsets_, self.thresh, self.closed_points)
proposals_idx, proposals_offset = bfs_cluster(labels_.cpu(), idx.cpu(), start_len.cpu(), self.min_points)
proposals_idx[:, 1] = object_idxs[proposals_idx[:, 1].long()].int()
return proposals_idx, proposals_offset
def get_instances(self, vertices, scores):
proposals_pred, labels = self.cluster(vertices, scores)
instances = {}
for proposal_id in range(len(proposals_pred)):
clusters_i = proposals_pred[proposal_id]
score = scores[clusters_i.bool(), labels[proposal_id]]
score = self.score_func(score)
instances[proposal_id] = {}
instances[proposal_id]['conf'] = score.cpu().numpy()
instances[proposal_id]['label_id'] = self.class_mapping.cpu()[labels[proposal_id]]
instances[proposal_id]['pred_mask'] = clusters_i.cpu().numpy()
return instances
| ContrastiveSceneContexts-main | downstream/insseg/lib/bfs/bfs.py |
'''
PointGroup operations
Written by Li Jiang
'''
| ContrastiveSceneContexts-main | downstream/insseg/lib/bfs/ops/ops.py |
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
setup(
name='PG_OP',
ext_modules=[
CUDAExtension('PG_OP', [
'src/bfs_cluster.cpp',
'src/bfs_cluster_kernel.cu',
])
],
cmdclass={'build_ext': BuildExtension}
)
| ContrastiveSceneContexts-main | downstream/insseg/lib/bfs/ops/setup.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import random
import torch
import hydra
import numpy as np
from lib.ddp_trainer import DetectionTrainer
from lib.distributed import multi_proc_run
def single_proc_run(config):
if not torch.cuda.is_available():
raise Exception('No GPUs FOUND.')
trainer = DetectionTrainer(config)
if config.net.is_train:
trainer.train()
else:
trainer.test()
@hydra.main(config_path='config', config_name='default.yaml')
def main(config):
# fix seed
np.random.seed(config.misc.seed)
torch.manual_seed(config.misc.seed)
torch.cuda.manual_seed(config.misc.seed)
port = random.randint(10001, 20001)
if config.misc.num_gpus > 1:
multi_proc_run(config.misc.num_gpus, port, fun=single_proc_run, fun_args=(config,))
else:
single_proc_run(config)
if __name__ == '__main__':
__spec__ = None
os.environ['MKL_THREADING_LAYER'] = 'GNU'
main()
| ContrastiveSceneContexts-main | downstream/votenet/ddp_main.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
class SunrgbdDatasetConfig(object):
def __init__(self):
self.num_class = 10
self.num_heading_bin = 12
self.num_size_cluster = 10
self.type2class={'bed':0, 'table':1, 'sofa':2, 'chair':3, 'toilet':4, 'desk':5, 'dresser':6, 'night_stand':7, 'bookshelf':8, 'bathtub':9}
self.class2type = {self.type2class[t]:t for t in self.type2class}
self.type2onehotclass={'bed':0, 'table':1, 'sofa':2, 'chair':3, 'toilet':4, 'desk':5, 'dresser':6, 'night_stand':7, 'bookshelf':8, 'bathtub':9}
self.type_mean_size = {'bathtub': np.array([0.765840,1.398258,0.472728]),
'bed': np.array([2.114256,1.620300,0.927272]),
'bookshelf': np.array([0.404671,1.071108,1.688889]),
'chair': np.array([0.591958,0.552978,0.827272]),
'desk': np.array([0.695190,1.346299,0.736364]),
'dresser': np.array([0.528526,1.002642,1.172878]),
'night_stand': np.array([0.500618,0.632163,0.683424]),
'sofa': np.array([0.923508,1.867419,0.845495]),
'table': np.array([0.791118,1.279516,0.718182]),
'toilet': np.array([0.699104,0.454178,0.756250])}
self.mean_size_arr = np.zeros((self.num_size_cluster, 3))
for i in range(self.num_size_cluster):
self.mean_size_arr[i,:] = self.type_mean_size[self.class2type[i]]
def size2class(self, size, type_name):
''' Convert 3D box size (l,w,h) to size class and size residual '''
size_class = self.type2class[type_name]
size_residual = size - self.type_mean_size[type_name]
return size_class, size_residual
def class2size(self, pred_cls, residual):
''' Inverse function to size2class '''
mean_size = self.type_mean_size[self.class2type[pred_cls]]
return mean_size + residual
def angle2class(self, angle):
''' Convert continuous angle to discrete class
[optinal] also small regression number from
class center angle to current angle.
angle is from 0-2pi (or -pi~pi), class center at 0, 1*(2pi/N), 2*(2pi/N) ... (N-1)*(2pi/N)
return is class of int32 of 0,1,...,N-1 and a number such that
class*(2pi/N) + number = angle
'''
num_class = self.num_heading_bin
angle = angle%(2*np.pi)
assert(angle>=0 and angle<=2*np.pi)
angle_per_class = 2*np.pi/float(num_class)
shifted_angle = (angle+angle_per_class/2)%(2*np.pi)
class_id = int(shifted_angle/angle_per_class)
residual_angle = shifted_angle - (class_id*angle_per_class+angle_per_class/2)
return class_id, residual_angle
def class2angle(self, pred_cls, residual, to_label_format=True):
''' Inverse function to angle2class '''
num_class = self.num_heading_bin
angle_per_class = 2*np.pi/float(num_class)
angle_center = pred_cls * angle_per_class
angle = angle_center + residual
if to_label_format and angle>np.pi:
angle = angle - 2*np.pi
return angle
def param2obb(self, center, heading_class, heading_residual, size_class, size_residual):
heading_angle = self.class2angle(heading_class, heading_residual)
box_size = self.class2size(int(size_class), size_residual)
obb = np.zeros((7,))
obb[0:3] = center
obb[3:6] = box_size
obb[6] = heading_angle*-1
return obb
| ContrastiveSceneContexts-main | downstream/votenet/datasets/sunrgbd/model_util_sunrgbd.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
''' Provides Python helper function to read My SUNRGBD dataset.
Author: Charles R. Qi
Date: October, 2017
Updated by Charles R. Qi
Date: December, 2018
Note: removed basis loading.
'''
import numpy as np
import cv2
import os
import scipy.io as sio # to load .mat files for depth points
type2class={'bed':0, 'table':1, 'sofa':2, 'chair':3, 'toilet':4, 'desk':5, 'dresser':6, 'night_stand':7, 'bookshelf':8, 'bathtub':9}
class2type = {type2class[t]:t for t in type2class}
def flip_axis_to_camera(pc):
''' Flip X-right,Y-forward,Z-up to X-right,Y-down,Z-forward
Input and output are both (N,3) array
'''
pc2 = np.copy(pc)
pc2[:,[0,1,2]] = pc2[:,[0,2,1]] # cam X,Y,Z = depth X,-Z,Y
pc2[:,1] *= -1
return pc2
def flip_axis_to_depth(pc):
pc2 = np.copy(pc)
pc2[:,[0,1,2]] = pc2[:,[0,2,1]] # depth X,Y,Z = cam X,Z,-Y
pc2[:,2] *= -1
return pc2
class SUNObject3d(object):
def __init__(self, line):
data = line.split(' ')
data[1:] = [float(x) for x in data[1:]]
self.classname = data[0]
self.xmin = data[1]
self.ymin = data[2]
self.xmax = data[1]+data[3]
self.ymax = data[2]+data[4]
self.box2d = np.array([self.xmin,self.ymin,self.xmax,self.ymax])
self.centroid = np.array([data[5],data[6],data[7]])
self.unused_dimension = np.array([data[8],data[9],data[10]])
self.w = data[8]
self.l = data[9]
self.h = data[10]
self.orientation = np.zeros((3,))
self.orientation[0] = data[11]
self.orientation[1] = data[12]
self.heading_angle = -1 * np.arctan2(self.orientation[1], self.orientation[0])
class SUNRGBD_Calibration(object):
''' Calibration matrices and utils
We define five coordinate system in SUN RGBD dataset
camera coodinate:
Z is forward, Y is downward, X is rightward
depth coordinate:
Just change axis order and flip up-down axis from camera coord
upright depth coordinate: tilted depth coordinate by Rtilt such that Z is gravity direction,
Z is up-axis, Y is forward, X is right-ward
upright camera coordinate:
Just change axis order and flip up-down axis from upright depth coordinate
image coordinate:
----> x-axis (u)
|
v
y-axis (v)
depth points are stored in upright depth coordinate.
labels for 3d box (basis, centroid, size) are in upright depth coordinate.
2d boxes are in image coordinate
We generate frustum point cloud and 3d box in upright camera coordinate
'''
def __init__(self, calib_filepath):
lines = [line.rstrip() for line in open(calib_filepath)]
Rtilt = np.array([float(x) for x in lines[0].split(' ')])
self.Rtilt = np.reshape(Rtilt, (3,3), order='F')
K = np.array([float(x) for x in lines[1].split(' ')])
self.K = np.reshape(K, (3,3), order='F')
self.f_u = self.K[0,0]
self.f_v = self.K[1,1]
self.c_u = self.K[0,2]
self.c_v = self.K[1,2]
def project_upright_depth_to_camera(self, pc):
''' project point cloud from depth coord to camera coordinate
Input: (N,3) Output: (N,3)
'''
# Project upright depth to depth coordinate
pc2 = np.dot(np.transpose(self.Rtilt), np.transpose(pc[:,0:3])) # (3,n)
return flip_axis_to_camera(np.transpose(pc2))
def project_upright_depth_to_image(self, pc):
''' Input: (N,3) Output: (N,2) UV and (N,) depth '''
pc2 = self.project_upright_depth_to_camera(pc)
uv = np.dot(pc2, np.transpose(self.K)) # (n,3)
uv[:,0] /= uv[:,2]
uv[:,1] /= uv[:,2]
return uv[:,0:2], pc2[:,2]
def project_upright_depth_to_upright_camera(self, pc):
return flip_axis_to_camera(pc)
def project_upright_camera_to_upright_depth(self, pc):
return flip_axis_to_depth(pc)
def project_image_to_camera(self, uv_depth):
n = uv_depth.shape[0]
x = ((uv_depth[:,0]-self.c_u)*uv_depth[:,2])/self.f_u
y = ((uv_depth[:,1]-self.c_v)*uv_depth[:,2])/self.f_v
pts_3d_camera = np.zeros((n,3))
pts_3d_camera[:,0] = x
pts_3d_camera[:,1] = y
pts_3d_camera[:,2] = uv_depth[:,2]
return pts_3d_camera
def project_image_to_upright_camerea(self, uv_depth):
pts_3d_camera = self.project_image_to_camera(uv_depth)
pts_3d_depth = flip_axis_to_depth(pts_3d_camera)
pts_3d_upright_depth = np.transpose(np.dot(self.Rtilt, np.transpose(pts_3d_depth)))
return self.project_upright_depth_to_upright_camera(pts_3d_upright_depth)
def rotx(t):
"""Rotation about the x-axis."""
c = np.cos(t)
s = np.sin(t)
return np.array([[1, 0, 0],
[0, c, -s],
[0, s, c]])
def roty(t):
"""Rotation about the y-axis."""
c = np.cos(t)
s = np.sin(t)
return np.array([[c, 0, s],
[0, 1, 0],
[-s, 0, c]])
def rotz(t):
"""Rotation about the z-axis."""
c = np.cos(t)
s = np.sin(t)
return np.array([[c, -s, 0],
[s, c, 0],
[0, 0, 1]])
def transform_from_rot_trans(R, t):
"""Transforation matrix from rotation matrix and translation vector."""
R = R.reshape(3, 3)
t = t.reshape(3, 1)
return np.vstack((np.hstack([R, t]), [0, 0, 0, 1]))
def inverse_rigid_trans(Tr):
"""Inverse a rigid body transform matrix (3x4 as [R|t])
[R'|-R't; 0|1]
"""
inv_Tr = np.zeros_like(Tr) # 3x4
inv_Tr[0:3,0:3] = np.transpose(Tr[0:3,0:3])
inv_Tr[0:3,3] = np.dot(-np.transpose(Tr[0:3,0:3]), Tr[0:3,3])
return inv_Tr
def read_sunrgbd_label(label_filename):
lines = [line.rstrip() for line in open(label_filename)]
objects = [SUNObject3d(line) for line in lines]
return objects
def load_image(img_filename):
return cv2.imread(img_filename)
def load_depth_points(depth_filename):
depth = np.loadtxt(depth_filename)
return depth
def load_depth_points_mat(depth_filename):
depth = sio.loadmat(depth_filename)['instance']
return depth
def random_shift_box2d(box2d, shift_ratio=0.1):
''' Randomly shift box center, randomly scale width and height
'''
r = shift_ratio
xmin,ymin,xmax,ymax = box2d
h = ymax-ymin
w = xmax-xmin
cx = (xmin+xmax)/2.0
cy = (ymin+ymax)/2.0
cx2 = cx + w*r*(np.random.random()*2-1)
cy2 = cy + h*r*(np.random.random()*2-1)
h2 = h*(1+np.random.random()*2*r-r) # 0.9 to 1.1
w2 = w*(1+np.random.random()*2*r-r) # 0.9 to 1.1
return np.array([cx2-w2/2.0, cy2-h2/2.0, cx2+w2/2.0, cy2+h2/2.0])
def in_hull(p, hull):
from scipy.spatial import Delaunay
if not isinstance(hull,Delaunay):
hull = Delaunay(hull)
return hull.find_simplex(p)>=0
def extract_pc_in_box3d(pc, box3d):
''' pc: (N,3), box3d: (8,3) '''
box3d_roi_inds = in_hull(pc[:,0:3], box3d)
return pc[box3d_roi_inds,:], box3d_roi_inds
def my_compute_box_3d(center, size, heading_angle):
R = rotz(-1*heading_angle)
l,w,h = size
x_corners = [-l,l,l,-l,-l,l,l,-l]
y_corners = [w,w,-w,-w,w,w,-w,-w]
z_corners = [h,h,h,h,-h,-h,-h,-h]
corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners]))
corners_3d[0,:] += center[0]
corners_3d[1,:] += center[1]
corners_3d[2,:] += center[2]
return np.transpose(corners_3d)
def compute_box_3d(obj, calib):
''' Takes an object and a projection matrix (P) and projects the 3d
bounding box into the image plane.
Returns:
corners_2d: (8,2) array in image coord.
corners_3d: (8,3) array in in upright depth coord.
'''
center = obj.centroid
# compute rotational matrix around yaw axis
R = rotz(-1*obj.heading_angle)
#b,a,c = dimension
#print R, a,b,c
# 3d bounding box dimensions
l = obj.l # along heading arrow
w = obj.w # perpendicular to heading arrow
h = obj.h
# rotate and translate 3d bounding box
x_corners = [-l,l,l,-l,-l,l,l,-l]
y_corners = [w,w,-w,-w,w,w,-w,-w]
z_corners = [h,h,h,h,-h,-h,-h,-h]
corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners]))
corners_3d[0,:] += center[0]
corners_3d[1,:] += center[1]
corners_3d[2,:] += center[2]
# project the 3d bounding box into the image plane
corners_2d,_ = calib.project_upright_depth_to_image(np.transpose(corners_3d))
#print 'corners_2d: ', corners_2d
return corners_2d, np.transpose(corners_3d)
def compute_orientation_3d(obj, calib):
''' Takes an object and a projection matrix (P) and projects the 3d
object orientation vector into the image plane.
Returns:
orientation_2d: (2,2) array in image coord.
orientation_3d: (2,3) array in depth coord.
'''
# orientation in object coordinate system
ori = obj.orientation
orientation_3d = np.array([[0, ori[0]],[0, ori[1]],[0,0]])
center = obj.centroid
orientation_3d[0,:] = orientation_3d[0,:] + center[0]
orientation_3d[1,:] = orientation_3d[1,:] + center[1]
orientation_3d[2,:] = orientation_3d[2,:] + center[2]
# project orientation into the image plane
orientation_2d,_ = calib.project_upright_depth_to_image(np.transpose(orientation_3d))
return orientation_2d, np.transpose(orientation_3d)
def draw_projected_box3d(image, qs, color=(255,255,255), thickness=2):
''' Draw 3d bounding box in image
qs: (8,2) array of vertices for the 3d box in following order:
1 -------- 0
/| /|
2 -------- 3 .
| | | |
. 5 -------- 4
|/ |/
6 -------- 7
'''
qs = qs.astype(np.int32)
for k in range(0,4):
#http://docs.enthought.com/mayavi/mayavi/auto/mlab_helper_functions.html
i,j=k,(k+1)%4
cv2.line(image, (qs[i,0],qs[i,1]), (qs[j,0],qs[j,1]), color, thickness, cv2.CV_AA) # use LINE_AA for opencv3
i,j=k+4,(k+1)%4 + 4
cv2.line(image, (qs[i,0],qs[i,1]), (qs[j,0],qs[j,1]), color, thickness, cv2.CV_AA)
i,j=k,k+4
cv2.line(image, (qs[i,0],qs[i,1]), (qs[j,0],qs[j,1]), color, thickness, cv2.CV_AA)
return image
import pickle
import gzip
def save_zipped_pickle(obj, filename, protocol=-1):
with gzip.open(filename, 'wb') as f:
pickle.dump(obj, f, protocol)
def load_zipped_pickle(filename):
with gzip.open(filename, 'rb') as f:
loaded_object = pickle.load(f)
return loaded_object
| ContrastiveSceneContexts-main | downstream/votenet/datasets/sunrgbd/sunrgbd_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
''' Helper class and functions for loading SUN RGB-D objects
Author: Charles R. Qi
Date: December, 2018
Note: removed unused code for frustum preparation.
Changed a way for data visualization (removed depdency on mayavi).
Load depth with scipy.io
'''
import os
import sys
import numpy as np
import sys
import cv2
import argparse
from PIL import Image
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '../utils/'))
import pc_util
import sunrgbd_utils
DEFAULT_TYPE_WHITELIST = ['bed','table','sofa','chair','toilet','desk','dresser','night_stand','bookshelf','bathtub']
class sunrgbd_object(object):
''' Load and parse object data '''
def __init__(self, root_dir, split='training', use_v1=False):
self.root_dir = root_dir
self.split = split
assert(self.split=='training')
self.split_dir = os.path.join(root_dir)
if split == 'training':
self.num_samples = 10335
elif split == 'testing':
self.num_samples = 2860
else:
print('Unknown split: %s' % (split))
exit(-1)
self.image_dir = os.path.join(self.split_dir, 'image')
self.calib_dir = os.path.join(self.split_dir, 'calib')
self.depth_dir = os.path.join(self.split_dir, 'depth')
if use_v1:
self.label_dir = os.path.join(self.split_dir, 'label_v1')
else:
self.label_dir = os.path.join(self.split_dir, 'label')
def __len__(self):
return self.num_samples
def get_image(self, idx):
img_filename = os.path.join(self.image_dir, '%06d.jpg'%(idx))
return sunrgbd_utils.load_image(img_filename)
def get_depth(self, idx):
depth_filename = os.path.join(self.depth_dir, '%06d.mat'%(idx))
return sunrgbd_utils.load_depth_points_mat(depth_filename)
def get_calibration(self, idx):
calib_filename = os.path.join(self.calib_dir, '%06d.txt'%(idx))
return sunrgbd_utils.SUNRGBD_Calibration(calib_filename)
def get_label_objects(self, idx):
label_filename = os.path.join(self.label_dir, '%06d.txt'%(idx))
return sunrgbd_utils.read_sunrgbd_label(label_filename)
def data_viz(data_dir, dump_dir=os.path.join(BASE_DIR, 'data_viz_dump')):
''' Examine and visualize SUN RGB-D data. '''
sunrgbd = sunrgbd_object(data_dir)
idxs = np.array(range(1,len(sunrgbd)+1))
np.random.seed(0)
np.random.shuffle(idxs)
for idx in range(len(sunrgbd)):
data_idx = idxs[idx]
print('-'*10, 'data index: ', data_idx)
pc = sunrgbd.get_depth(data_idx)
print('Point cloud shape:', pc.shape)
# Project points to image
calib = sunrgbd.get_calibration(data_idx)
uv,d = calib.project_upright_depth_to_image(pc[:,0:3])
print('Point UV:', uv)
print('Point depth:', d)
import matplotlib.pyplot as plt
cmap = plt.cm.get_cmap('hsv', 256)
cmap = np.array([cmap(i) for i in range(256)])[:,:3]*255
img = sunrgbd.get_image(data_idx)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
for i in range(uv.shape[0]):
depth = d[i]
color = cmap[int(120.0/depth),:]
cv2.circle(img, (int(np.round(uv[i,0])), int(np.round(uv[i,1]))), 2,
color=tuple(color), thickness=-1)
if not os.path.exists(dump_dir):
os.mkdir(dump_dir)
Image.fromarray(img).save(os.path.join(dump_dir,'img_depth.jpg'))
# Load box labels
objects = sunrgbd.get_label_objects(data_idx)
print('Objects:', objects)
# Draw 2D boxes on image
img = sunrgbd.get_image(data_idx)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
for i,obj in enumerate(objects):
cv2.rectangle(img, (int(obj.xmin),int(obj.ymin)),
(int(obj.xmax),int(obj.ymax)), (0,255,0), 2)
cv2.putText(img, '%d %s'%(i,obj.classname), (max(int(obj.xmin),15),
max(int(obj.ymin),15)), cv2.FONT_HERSHEY_SIMPLEX, 0.8,
(255,0,0), 2)
Image.fromarray(img).save(os.path.join(dump_dir, 'img_box2d.jpg'))
# Dump OBJ files for the colored point cloud
for num_point in [10000,20000,40000,80000]:
sampled_pcrgb = pc_util.random_sampling(pc, num_point)
pc_util.write_ply_rgb(sampled_pcrgb[:,0:3],
(sampled_pcrgb[:,3:]*256).astype(np.int8),
os.path.join(dump_dir, 'pcrgb_%dk.obj'%(num_point//1000)))
# Dump OBJ files for 3D bounding boxes
# l,w,h correspond to dx,dy,dz
# heading angle is from +X rotating towards -Y
# (+X is degree, -Y is 90 degrees)
oriented_boxes = []
for obj in objects:
obb = np.zeros((7))
obb[0:3] = obj.centroid
# Some conversion to map with default setting of w,l,h
# and angle in box dumping
obb[3:6] = np.array([obj.l,obj.w,obj.h])*2
obb[6] = -1 * obj.heading_angle
print('Object cls, heading, l, w, h:',\
obj.classname, obj.heading_angle, obj.l, obj.w, obj.h)
oriented_boxes.append(obb)
if len(oriented_boxes)>0:
oriented_boxes = np.vstack(tuple(oriented_boxes))
pc_util.write_oriented_bbox(oriented_boxes,
os.path.join(dump_dir, 'obbs.ply'))
else:
print('-'*30)
continue
# Draw 3D boxes on depth points
box3d = []
ori3d = []
for obj in objects:
corners_3d_image, corners_3d = sunrgbd_utils.compute_box_3d(obj, calib)
ori_3d_image, ori_3d = sunrgbd_utils.compute_orientation_3d(obj, calib)
print('Corners 3D: ', corners_3d)
box3d.append(corners_3d)
ori3d.append(ori_3d)
pc_box3d = np.concatenate(box3d, 0)
pc_ori3d = np.concatenate(ori3d, 0)
print(pc_box3d.shape)
print(pc_ori3d.shape)
pc_util.write_ply(pc_box3d, os.path.join(dump_dir, 'box3d_corners.ply'))
pc_util.write_ply(pc_ori3d, os.path.join(dump_dir, 'box3d_ori.ply'))
print('-'*30)
print('Point clouds and bounding boxes saved to PLY files under %s'%(dump_dir))
print('Type anything to continue to the next sample...')
input()
def extract_sunrgbd_data(idx_filename, split, output_folder, num_point=20000,
type_whitelist=DEFAULT_TYPE_WHITELIST,
save_votes=False, use_v1=False, skip_empty_scene=True):
""" Extract scene point clouds and
bounding boxes (centroids, box sizes, heading angles, semantic classes).
Dumped point clouds and boxes are in upright depth coord.
Args:
idx_filename: a TXT file where each line is an int number (index)
split: training or testing
save_votes: whether to compute and save Ground truth votes.
use_v1: use the SUN RGB-D V1 data
skip_empty_scene: if True, skip scenes that contain no object (no objet in whitelist)
Dumps:
<id>_pc.npz of (N,6) where N is for number of subsampled points and 6 is
for XYZ and RGB (in 0~1) in upright depth coord
<id>_bbox.npy of (K,8) where K is the number of objects, 8 is for
centroids (cx,cy,cz), dimension (l,w,h), heanding_angle and semantic_class
<id>_votes.npz of (N,10) with 0/1 indicating whether the point belongs to an object,
then three sets of GT votes for up to three objects. If the point is only in one
object's OBB, then the three GT votes are the same.
"""
dataset = sunrgbd_object('./sunrgbd_trainval', split, use_v1=use_v1)
data_idx_list = [int(line.rstrip()) for line in open(idx_filename)]
if not os.path.exists(output_folder):
os.mkdir(output_folder)
for data_idx in data_idx_list:
print('------------- ', data_idx)
objects = dataset.get_label_objects(data_idx)
# Skip scenes with 0 object
if skip_empty_scene and (len(objects)==0 or \
len([obj for obj in objects if obj.classname in type_whitelist])==0):
continue
object_list = []
for obj in objects:
if obj.classname not in type_whitelist: continue
obb = np.zeros((8))
obb[0:3] = obj.centroid
# Note that compared with that in data_viz, we do not time 2 to l,w.h
# neither do we flip the heading angle
obb[3:6] = np.array([obj.l,obj.w,obj.h])
obb[6] = obj.heading_angle
obb[7] = sunrgbd_utils.type2class[obj.classname]
object_list.append(obb)
if len(object_list)==0:
obbs = np.zeros((0,8))
else:
obbs = np.vstack(object_list) # (K,8)
pc_upright_depth = dataset.get_depth(data_idx)
pc_upright_depth_subsampled = pc_util.random_sampling(pc_upright_depth, num_point)
np.savez_compressed(os.path.join(output_folder,'%06d_pc.npz'%(data_idx)),
pc=pc_upright_depth_subsampled)
np.save(os.path.join(output_folder, '%06d_bbox.npy'%(data_idx)), obbs)
if save_votes:
N = pc_upright_depth_subsampled.shape[0]
point_votes = np.zeros((N,10)) # 3 votes and 1 vote mask
point_vote_idx = np.zeros((N)).astype(np.int32) # in the range of [0,2]
indices = np.arange(N)
for obj in objects:
if obj.classname not in type_whitelist: continue
try:
# Find all points in this object's OBB
box3d_pts_3d = sunrgbd_utils.my_compute_box_3d(obj.centroid,
np.array([obj.l,obj.w,obj.h]), obj.heading_angle)
pc_in_box3d,inds = sunrgbd_utils.extract_pc_in_box3d(\
pc_upright_depth_subsampled, box3d_pts_3d)
# Assign first dimension to indicate it is in an object box
point_votes[inds,0] = 1
# Add the votes (all 0 if the point is not in any object's OBB)
votes = np.expand_dims(obj.centroid,0) - pc_in_box3d[:,0:3]
sparse_inds = indices[inds] # turn dense True,False inds to sparse number-wise inds
for i in range(len(sparse_inds)):
j = sparse_inds[i]
point_votes[j, int(point_vote_idx[j]*3+1):int((point_vote_idx[j]+1)*3+1)] = votes[i,:]
# Populate votes with the fisrt vote
if point_vote_idx[j] == 0:
point_votes[j,4:7] = votes[i,:]
point_votes[j,7:10] = votes[i,:]
point_vote_idx[inds] = np.minimum(2, point_vote_idx[inds]+1)
except:
print('ERROR ----', data_idx, obj.classname)
np.savez_compressed(os.path.join(output_folder, '%06d_votes.npz'%(data_idx)),
point_votes = point_votes)
def get_box3d_dim_statistics(idx_filename,
type_whitelist=DEFAULT_TYPE_WHITELIST,
save_path=None):
""" Collect 3D bounding box statistics.
Used for computing mean box sizes. """
dataset = sunrgbd_object('./sunrgbd_trainval')
dimension_list = []
type_list = []
ry_list = []
data_idx_list = [int(line.rstrip()) for line in open(idx_filename)]
for data_idx in data_idx_list:
print('------------- ', data_idx)
calib = dataset.get_calibration(data_idx) # 3 by 4 matrix
objects = dataset.get_label_objects(data_idx)
for obj_idx in range(len(objects)):
obj = objects[obj_idx]
if obj.classname not in type_whitelist: continue
heading_angle = -1 * np.arctan2(obj.orientation[1], obj.orientation[0])
dimension_list.append(np.array([obj.l,obj.w,obj.h]))
type_list.append(obj.classname)
ry_list.append(heading_angle)
import cPickle as pickle
if save_path is not None:
with open(save_path,'wb') as fp:
pickle.dump(type_list, fp)
pickle.dump(dimension_list, fp)
pickle.dump(ry_list, fp)
# Get average box size for different catgories
box3d_pts = np.vstack(dimension_list)
for class_type in sorted(set(type_list)):
cnt = 0
box3d_list = []
for i in range(len(dimension_list)):
if type_list[i]==class_type:
cnt += 1
box3d_list.append(dimension_list[i])
median_box3d = np.median(box3d_list,0)
print("\'%s\': np.array([%f,%f,%f])," % \
(class_type, median_box3d[0]*2, median_box3d[1]*2, median_box3d[2]*2))
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--viz', action='store_true', help='Run data visualization.')
parser.add_argument('--compute_median_size', action='store_true', help='Compute median 3D bounding box sizes for each class.')
parser.add_argument('--gen_v1_data', action='store_true', help='Generate V1 dataset.')
parser.add_argument('--gen_v2_data', action='store_true', help='Generate V2 dataset.')
args = parser.parse_args()
if args.viz:
data_viz(os.path.join(BASE_DIR, 'sunrgbd_trainval'))
exit()
if args.compute_median_size:
get_box3d_dim_statistics(os.path.join(BASE_DIR, 'sunrgbd_trainval/train_data_idx.txt'))
exit()
if args.gen_v1_data:
extract_sunrgbd_data(os.path.join(BASE_DIR, 'sunrgbd_trainval/train_data_idx.txt'),
split = 'training',
output_folder = os.path.join(BASE_DIR, 'sunrgbd_pc_bbox_votes_50k_v1_train'),
save_votes=True, num_point=50000, use_v1=True, skip_empty_scene=False)
extract_sunrgbd_data(os.path.join(BASE_DIR, 'sunrgbd_trainval/val_data_idx.txt'),
split = 'training',
output_folder = os.path.join(BASE_DIR, 'sunrgbd_pc_bbox_votes_50k_v1_val'),
save_votes=True, num_point=50000, use_v1=True, skip_empty_scene=False)
if args.gen_v2_data:
extract_sunrgbd_data(os.path.join(BASE_DIR, 'sunrgbd_trainval/train_data_idx.txt'),
split = 'training',
output_folder = os.path.join(BASE_DIR, 'sunrgbd_pc_bbox_votes_50k_v2_train'),
save_votes=True, num_point=50000, use_v1=False, skip_empty_scene=False)
extract_sunrgbd_data(os.path.join(BASE_DIR, 'sunrgbd_trainval/val_data_idx.txt'),
split = 'training',
output_folder = os.path.join(BASE_DIR, 'sunrgbd_pc_bbox_votes_50k_v2_val'),
save_votes=True, num_point=50000, use_v1=False, skip_empty_scene=False)
| ContrastiveSceneContexts-main | downstream/votenet/datasets/sunrgbd/sunrgbd_data.py |
# coding: utf-8
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" Dataset for 3D object detection on SUN RGB-D (with support of vote supervision).
A sunrgbd oriented bounding box is parameterized by (cx,cy,cz), (l,w,h) -- (dx,dy,dz) in upright depth coord
(Z is up, Y is forward, X is right ward), heading angle (from +X rotating to -Y) and semantic class
Point clouds are in **upright_depth coordinate (X right, Y forward, Z upward)**
Return heading class, heading residual, size class and size residual for 3D bounding boxes.
Oriented bounding box is parameterized by (cx,cy,cz), (l,w,h), heading_angle and semantic class label.
(cx,cy,cz) is in upright depth coordinate
(l,h,w) are *half length* of the object sizes
The heading angle is a rotation rad from +X rotating towards -Y. (+X is 0, -Y is pi/2)
Author: Charles R. Qi
Date: 2019
"""
import os
import sys
import numpy as np
from torch.utils.data import Dataset
import scipy.io as sio # to load .mat files for depth points
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import pc_util
import sunrgbd_utils
from model_util_sunrgbd import SunrgbdDatasetConfig
DC = SunrgbdDatasetConfig() # dataset specific config
MAX_NUM_OBJ = 64 # maximum number of objects allowed per scene
MEAN_COLOR_RGB = np.array([0.5,0.5,0.5]) # sunrgbd color is in 0~1
class SunrgbdDetectionVotesDataset(Dataset):
def __init__(self, split_set='train', num_points=20000,
use_color=False, use_height=False, use_v1=False,
augment=False, scan_idx_list=None):
assert(num_points<=50000)
self.use_v1 = use_v1
if use_v1:
self.data_path = os.path.join(ROOT_DIR,
'sunrgbd/sunrgbd_pc_bbox_votes_50k_v1_%s'%(split_set))
else:
self.data_path = os.path.join(ROOT_DIR,
'sunrgbd/sunrgbd_pc_bbox_votes_50k_v2_%s'%(split_set))
self.raw_data_path = os.path.join(ROOT_DIR, 'sunrgbd/sunrgbd_trainval')
self.scan_names = sorted(list(set([os.path.basename(x)[0:6] \
for x in os.listdir(self.data_path)])))
if scan_idx_list is not None:
self.scan_names = [self.scan_names[i] for i in scan_idx_list]
self.scan_names = self.scan_names[:int(len(self.scan_names))]
self.num_points = num_points
self.augment = augment
self.use_color = use_color
self.use_height = use_height
def __len__(self):
return len(self.scan_names)
def __getitem__(self, idx):
"""
Returns a dict with following keys:
point_clouds: (N,3+C)
center_label: (MAX_NUM_OBJ,3) for GT box center XYZ
heading_class_label: (MAX_NUM_OBJ,) with int values in 0,...,NUM_HEADING_BIN-1
heading_residual_label: (MAX_NUM_OBJ,)
size_classe_label: (MAX_NUM_OBJ,) with int values in 0,...,NUM_SIZE_CLUSTER
size_residual_label: (MAX_NUM_OBJ,3)
sem_cls_label: (MAX_NUM_OBJ,) semantic class index
box_label_mask: (MAX_NUM_OBJ) as 0/1 with 1 indicating a unique box
vote_label: (N,9) with votes XYZ (3 votes: X1Y1Z1, X2Y2Z2, X3Y3Z3)
if there is only one vote than X1==X2==X3 etc.
vote_label_mask: (N,) with 0/1 with 1 indicating the point
is in one of the object's OBB.
scan_idx: int scan index in scan_names list
max_gt_bboxes: unused
"""
scan_name = self.scan_names[idx]
point_cloud = np.load(os.path.join(self.data_path, scan_name)+'_pc.npz')['pc'] # Nx6
bboxes = np.load(os.path.join(self.data_path, scan_name)+'_bbox.npy') # K,8
point_votes = np.load(os.path.join(self.data_path, scan_name)+'_votes.npz')['point_votes'] # Nx10
point_cloud = point_cloud[:,0:6]
point_cloud[:,3:] = (point_cloud[:,3:]-MEAN_COLOR_RGB)
if self.use_height:
floor_height = np.percentile(point_cloud[:,2],0.99)
height = point_cloud[:,2] - floor_height
point_cloud = np.concatenate([point_cloud, np.expand_dims(height, 1)],1) # (N,4) or (N,7)
# ------------------------------- DATA AUGMENTATION ------------------------------
if self.augment:
if np.random.random() > 0.5:
# Flipping along the YZ plane
point_cloud[:,0] = -1 * point_cloud[:,0]
bboxes[:,0] = -1 * bboxes[:,0]
bboxes[:,6] = np.pi - bboxes[:,6]
point_votes[:,[1,4,7]] = -1 * point_votes[:,[1,4,7]]
# Rotation along up-axis/Z-axis
rot_angle = (np.random.random()*np.pi/3) - np.pi/6 # -30 ~ +30 degree
rot_mat = sunrgbd_utils.rotz(rot_angle)
point_votes_end = np.zeros_like(point_votes)
point_votes_end[:,1:4] = np.dot(point_cloud[:,0:3] + point_votes[:,1:4], np.transpose(rot_mat))
point_votes_end[:,4:7] = np.dot(point_cloud[:,0:3] + point_votes[:,4:7], np.transpose(rot_mat))
point_votes_end[:,7:10] = np.dot(point_cloud[:,0:3] + point_votes[:,7:10], np.transpose(rot_mat))
point_cloud[:,0:3] = np.dot(point_cloud[:,0:3], np.transpose(rot_mat))
bboxes[:,0:3] = np.dot(bboxes[:,0:3], np.transpose(rot_mat))
bboxes[:,6] -= rot_angle
point_votes[:,1:4] = point_votes_end[:,1:4] - point_cloud[:,0:3]
point_votes[:,4:7] = point_votes_end[:,4:7] - point_cloud[:,0:3]
point_votes[:,7:10] = point_votes_end[:,7:10] - point_cloud[:,0:3]
# Augment RGB color
if self.use_color:
rgb_color = point_cloud[:,3:6] + MEAN_COLOR_RGB
rgb_color *= (1+0.4*np.random.random(3)-0.2) # brightness change for each channel
rgb_color += (0.1*np.random.random(3)-0.05) # color shift for each channel
rgb_color += np.expand_dims((0.05*np.random.random(point_cloud.shape[0])-0.025), -1) # jittering on each pixel
rgb_color = np.clip(rgb_color, 0, 1)
# randomly drop out 30% of the points' colors
rgb_color *= np.expand_dims(np.random.random(point_cloud.shape[0])>0.3,-1)
point_cloud[:,3:6] = rgb_color - MEAN_COLOR_RGB
# Augment point cloud scale: 0.85x-1.15x
scale_ratio = np.random.random()*0.3+0.85
scale_ratio = np.expand_dims(np.tile(scale_ratio,3),0)
point_cloud[:,0:3] *= scale_ratio
bboxes[:,0:3] *= scale_ratio
bboxes[:,3:6] *= scale_ratio
point_votes[:,1:4] *= scale_ratio
point_votes[:,4:7] *= scale_ratio
point_votes[:,7:10] *= scale_ratio
if self.use_height:
point_cloud[:,-1] *= scale_ratio[0,0]
# ------------------------------- LABELS ------------------------------
box3d_centers = np.zeros((MAX_NUM_OBJ, 3))
box3d_sizes = np.zeros((MAX_NUM_OBJ, 3))
angle_classes = np.zeros((MAX_NUM_OBJ,))
angle_residuals = np.zeros((MAX_NUM_OBJ,))
size_classes = np.zeros((MAX_NUM_OBJ,))
size_residuals = np.zeros((MAX_NUM_OBJ, 3))
label_mask = np.zeros((MAX_NUM_OBJ))
label_mask[0:bboxes.shape[0]] = 1
max_bboxes = np.zeros((MAX_NUM_OBJ, 8))
max_bboxes[0:bboxes.shape[0],:] = bboxes
for i in range(bboxes.shape[0]):
bbox = bboxes[i]
semantic_class = bbox[7]
box3d_center = bbox[0:3]
angle_class, angle_residual = DC.angle2class(bbox[6])
# NOTE: The mean size stored in size2class is of full length of box edges,
# while in sunrgbd_data.py data dumping we dumped *half* length l,w,h.. so have to time it by 2 here
box3d_size = bbox[3:6]*2
size_class, size_residual = DC.size2class(box3d_size, DC.class2type[semantic_class])
box3d_centers[i,:] = box3d_center
angle_classes[i] = angle_class
angle_residuals[i] = angle_residual
size_classes[i] = size_class
size_residuals[i] = size_residual
box3d_sizes[i,:] = box3d_size
target_bboxes_mask = label_mask
target_bboxes = np.zeros((MAX_NUM_OBJ, 6))
for i in range(bboxes.shape[0]):
bbox = bboxes[i]
corners_3d = sunrgbd_utils.my_compute_box_3d(bbox[0:3], bbox[3:6], bbox[6])
# compute axis aligned box
xmin = np.min(corners_3d[:,0])
ymin = np.min(corners_3d[:,1])
zmin = np.min(corners_3d[:,2])
xmax = np.max(corners_3d[:,0])
ymax = np.max(corners_3d[:,1])
zmax = np.max(corners_3d[:,2])
target_bbox = np.array([(xmin+xmax)/2, (ymin+ymax)/2, (zmin+zmax)/2, xmax-xmin, ymax-ymin, zmax-zmin])
target_bboxes[i,:] = target_bbox
point_cloud, choices = pc_util.random_sampling(point_cloud, self.num_points, return_choices=True)
point_votes_mask = point_votes[choices,0]
point_votes = point_votes[choices,1:]
ret_dict = {}
ret_dict['point_clouds'] = point_cloud.astype(np.float32)[:,:3]
ret_dict['center_label'] = target_bboxes.astype(np.float32)[:,0:3]
ret_dict['heading_class_label'] = angle_classes.astype(np.int64)
ret_dict['heading_residual_label'] = angle_residuals.astype(np.float32)
ret_dict['size_class_label'] = size_classes.astype(np.int64)
ret_dict['size_residual_label'] = size_residuals.astype(np.float32)
target_bboxes_semcls = np.zeros((MAX_NUM_OBJ))
target_bboxes_semcls[0:bboxes.shape[0]] = bboxes[:,-1] # from 0 to 9
ret_dict['sem_cls_label'] = target_bboxes_semcls.astype(np.int64)
ret_dict['box_label_mask'] = target_bboxes_mask.astype(np.float32)
ret_dict['vote_label'] = point_votes.astype(np.float32)
ret_dict['vote_label_mask'] = point_votes_mask.astype(np.int64)
ret_dict['scan_idx'] = np.array(idx).astype(np.int64)
ret_dict['max_gt_bboxes'] = max_bboxes
ret_dict['pcl_color'] = point_cloud.astype(np.float32)[:,3:6]
return ret_dict
def viz_votes(pc, point_votes, point_votes_mask):
""" Visualize point votes and point votes mask labels
pc: (N,3 or 6), point_votes: (N,9), point_votes_mask: (N,)
"""
inds = (point_votes_mask==1)
pc_obj = pc[inds,0:3]
pc_obj_voted1 = pc_obj + point_votes[inds,0:3]
pc_obj_voted2 = pc_obj + point_votes[inds,3:6]
pc_obj_voted3 = pc_obj + point_votes[inds,6:9]
pc_util.write_ply(pc_obj, 'pc_obj.ply')
pc_util.write_ply(pc_obj_voted1, 'pc_obj_voted1.ply')
pc_util.write_ply(pc_obj_voted2, 'pc_obj_voted2.ply')
pc_util.write_ply(pc_obj_voted3, 'pc_obj_voted3.ply')
def viz_obb(pc, label, mask, angle_classes, angle_residuals,
size_classes, size_residuals):
""" Visualize oriented bounding box ground truth
pc: (N,3)
label: (K,3) K == MAX_NUM_OBJ
mask: (K,)
angle_classes: (K,)
angle_residuals: (K,)
size_classes: (K,)
size_residuals: (K,3)
"""
oriented_boxes = []
K = label.shape[0]
for i in range(K):
if mask[i] == 0: continue
obb = np.zeros(7)
obb[0:3] = label[i,0:3]
heading_angle = DC.class2angle(angle_classes[i], angle_residuals[i])
box_size = DC.class2size(size_classes[i], size_residuals[i])
obb[3:6] = box_size
obb[6] = -1 * heading_angle
print(obb)
oriented_boxes.append(obb)
pc_util.write_oriented_bbox(oriented_boxes, 'gt_obbs.ply')
pc_util.write_ply(label[mask==1,:], 'gt_centroids.ply')
def get_sem_cls_statistics():
""" Compute number of objects for each semantic class """
d = SunrgbdDetectionVotesDataset(use_height=True, use_color=True, use_v1=True, augment=True)
sem_cls_cnt = {}
for i in range(len(d)):
if i%10==0: print(i)
sample = d[i]
pc = sample['point_clouds']
sem_cls = sample['sem_cls_label']
mask = sample['box_label_mask']
for j in sem_cls:
if mask[j] == 0: continue
if sem_cls[j] not in sem_cls_cnt:
sem_cls_cnt[sem_cls[j]] = 0
sem_cls_cnt[sem_cls[j]] += 1
print(sem_cls_cnt)
if __name__=='__main__':
d = SunrgbdDetectionVotesDataset(use_height=True, use_color=True, use_v1=True, augment=True)
sample = d[200]
print(sample['vote_label'].shape, sample['vote_label_mask'].shape)
pc_util.write_ply(sample['point_clouds'], 'pc.ply')
viz_votes(sample['point_clouds'], sample['vote_label'], sample['vote_label_mask'])
viz_obb(sample['point_clouds'], sample['center_label'], sample['box_label_mask'],
sample['heading_class_label'], sample['heading_residual_label'],
sample['size_class_label'], sample['size_residual_label'])
| ContrastiveSceneContexts-main | downstream/votenet/datasets/sunrgbd/sunrgbd_detection_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" Utility functions for metric evaluation.
Author: Or Litany and Charles R. Qi
"""
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
import numpy as np
def calc_iou(box_a, box_b):
"""Computes IoU of two axis aligned bboxes.
Args:
box_a, box_b: 6D of center and lengths
Returns:
iou
"""
max_a = box_a[3:6]
max_b = box_b[3:6]
min_max = np.array([max_a, max_b]).min(0)
min_a = box_a[0:3]
min_b = box_b[0:3]
max_min = np.array([min_a, min_b]).max(0)
if not ((min_max > max_min).all()):
return 0.0
intersection = (min_max - max_min).prod()
vol_a = (box_a[3:6] - box_a[0:3]).prod()
vol_b = (box_b[3:6] - box_b[0:3]).prod()
union = vol_a + vol_b - intersection
return 1.0*intersection / union
| ContrastiveSceneContexts-main | downstream/votenet/datasets/evaluation/metric_util.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import os, sys, argparse
import inspect
from copy import deepcopy
from evaluate_object_detection_helper import eval_det
import numpy as np
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
import util
import util_3d
parser = argparse.ArgumentParser()
parser.add_argument('--pred_path', required=True, help='path to directory of predicted .txt files')
parser.add_argument('--gt_path', required=True, help='path to directory of gt .txt files')
parser.add_argument('--output_file', default='', help='output file [default: pred_path/object_detection_evaluation.txt]')
opt = parser.parse_args()
if opt.output_file == '':
opt.output_file = os.path.join(opt.pred_path, 'object_detection_evaluation.txt')
CLASS_LABELS = ['cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture']
VALID_CLASS_IDS = np.array([3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])
ID_TO_LABEL = {}
LABEL_TO_ID = {}
for i in range(len(VALID_CLASS_IDS)):
LABEL_TO_ID[CLASS_LABELS[i]] = VALID_CLASS_IDS[i]
ID_TO_LABEL[VALID_CLASS_IDS[i]] = CLASS_LABELS[i]
opt.overlaps = np.array([0.5,0.25])
# minimum region size for evaluation [verts]
opt.min_region_sizes = np.array( [ 100 ] )
# distance thresholds [m]
opt.distance_threshes = np.array( [ float('inf') ] )
# distance confidences
opt.distance_confs = np.array( [ -float('inf') ] )
def compute_averages(aps):
d_inf = 0
o50 = np.where(np.isclose(opt.overlaps,0.5))
o25 = np.where(np.isclose(opt.overlaps,0.25))
avg_dict = {}
#avg_dict['all_ap'] = np.nanmean(aps[ d_inf,:,: ])
avg_dict['all_ap_50%'] = np.nanmean(aps[ d_inf,:,o50])
avg_dict['all_ap_25%'] = np.nanmean(aps[ d_inf,:,o25])
avg_dict["classes"] = {}
for (li,label_name) in enumerate(CLASS_LABELS):
avg_dict["classes"][label_name] = {}
#avg_dict["classes"][label_name]["ap"] = np.average(aps[ d_inf,li, :])
avg_dict["classes"][label_name]["ap50%"] = np.average(aps[ d_inf,li,o50])
avg_dict["classes"][label_name]["ap25%"] = np.average(aps[ d_inf,li,o25])
return avg_dict
def print_results(avgs):
sep = ""
col1 = ":"
lineLen = 64
print("")
print("#"*lineLen)
line = ""
line += "{:<15}".format("what" ) + sep + col1
line += "{:>15}".format("AP_50%" ) + sep
line += "{:>15}".format("AP_25%" ) + sep
print(line)
print("#"*lineLen)
for (li,label_name) in enumerate(CLASS_LABELS):
ap_50o = avgs["classes"][label_name]["ap50%"]
ap_25o = avgs["classes"][label_name]["ap25%"]
line = "{:<15}".format(label_name) + sep + col1
line += sep + "{:>15.3f}".format(ap_50o ) + sep
line += sep + "{:>15.3f}".format(ap_25o ) + sep
print(line)
all_ap_50o = avgs["all_ap_50%"]
all_ap_25o = avgs["all_ap_25%"]
print("-"*lineLen)
line = "{:<15}".format("average") + sep + col1
line += "{:>15.3f}".format(all_ap_50o) + sep
line += "{:>15.3f}".format(all_ap_25o) + sep
print(line)
print("")
def write_result_file(avgs, filename):
_SPLITTER = ','
with open(filename, 'w') as f:
f.write(_SPLITTER.join(['class', 'class id', 'ap50', 'ap25']) + '\n')
for i in range(len(VALID_CLASS_IDS)):
class_name = CLASS_LABELS[i]
class_id = VALID_CLASS_IDS[i]
ap50 = avgs["classes"][class_name]["ap50%"]
ap25 = avgs["classes"][class_name]["ap25%"]
f.write(_SPLITTER.join([str(x) for x in [class_name, class_id, ap50, ap25]]) + '\n')
def evaluate(pred_files, gt_files, pred_path, output_file):
print('evaluating', len(pred_files), 'scans...')
overlaps = opt.overlaps
ap_scores = np.zeros( (1, len(CLASS_LABELS) , len(overlaps)) , np.float )
pred_all = {}
gt_all = {}
for i in range(len(pred_files)):
matches_key = os.path.abspath(gt_files[i])
image_id = os.path.basename(matches_key)
# assign gt to predictions
pred_all[image_id] = []
gt_all[image_id] = []
#read prediction file
lines = open(pred_files[i]).read().splitlines()
for line in lines:
parts = line.split(' ')
if len(parts) != 8:
util.print_error('invalid object detection prediction file. Expected (per line): [minx] [miny] [minz] [maxx] [maxy] [maxz] [label_id] [score]', user_fault=True)
bbox = np.array([float(parts[0]), float(parts[1]), float(parts[2]), float(parts[3]), float(parts[4]), float(parts[5])])
class_id =int(float(parts[6]))
if not class_id in VALID_CLASS_IDS:
continue
classname = ID_TO_LABEL[class_id]
score = float(parts[7])
pred_all[image_id].append((classname, bbox, score))
#read ground truth file
lines = open(gt_files[i]).read().splitlines()
for line in lines:
parts = line.split(' ')
if len(parts) != 7:
util.print_error('invalid object detection ground truth file. Expected (per line): [minx] [miny] [minz] [maxx] [maxy] [maxz] [label_id]', user_fault=True)
bbox = np.array([float(parts[0]), float(parts[1]), float(parts[2]), float(parts[3]), float(parts[4]), float(parts[5])])
class_id =int(float(parts[6]))
if not class_id in VALID_CLASS_IDS:
continue
classname = ID_TO_LABEL[class_id]
gt_all[image_id].append((classname, bbox))
for oi, overlap_th in enumerate(overlaps):
_,_,ap_dict = eval_det(pred_all, gt_all, ovthresh=overlap_th)
for label in ap_dict:
id = CLASS_LABELS.index(label)
ap_scores[0,id, oi] = ap_dict[label]
#print(ap_scores)
avgs = compute_averages(ap_scores)
# print
print_results(avgs)
write_result_file(avgs, output_file)
def main():
pred_files = [f for f in os.listdir(opt.pred_path) if f.endswith('.txt') and f != 'object_detection_evaluation.txt']
gt_files = []
if len(pred_files) == 0:
util.print_error('No result files found.', user_fault=True)
for i in range(len(pred_files)):
gt_file = os.path.join(opt.gt_path, pred_files[i])
if not os.path.isfile(gt_file):
util.print_error('Result file {} does not match any gt file'.format(pred_files[i]), user_fault=True)
gt_files.append(gt_file)
pred_files[i] = os.path.join(opt.pred_path, pred_files[i])
# evaluate
evaluate(pred_files, gt_files, opt.pred_path, opt.output_file)
if __name__ == '__main__':
main()
| ContrastiveSceneContexts-main | downstream/votenet/datasets/evaluation/evaluate_object_detection.py |
import os, sys
import csv
import numpy as np
import imageio
# print an error message and quit
def print_error(message, user_fault=False):
sys.stderr.write('ERROR: ' + str(message) + '\n')
if user_fault:
sys.exit(2)
sys.exit(-1)
# if string s represents an int
def represents_int(s):
try:
int(s)
return True
except ValueError:
return False
def read_label_mapping(filename, label_from='raw_category', label_to='nyu40id'):
assert os.path.isfile(filename)
mapping = dict()
with open(filename) as csvfile:
reader = csv.DictReader(csvfile, delimiter='\t')
for row in reader:
mapping[row[label_from]] = int(row[label_to])
# if ints convert
if represents_int(mapping.keys()[0]):
mapping = {int(k):v for k,v in mapping.items()}
return mapping
# input: scene_types.txt or scene_types_all.txt
def read_scene_types_mapping(filename, remove_spaces=True):
assert os.path.isfile(filename)
mapping = dict()
lines = open(filename).read().splitlines()
lines = [line.split('\t') for line in lines]
if remove_spaces:
mapping = { x[1].strip():int(x[0]) for x in lines }
else:
mapping = { x[1]:int(x[0]) for x in lines }
return mapping
# color by label
def visualize_label_image(filename, image):
height = image.shape[0]
width = image.shape[1]
vis_image = np.zeros([height, width, 3], dtype=np.uint8)
color_palette = create_color_palette()
for idx, color in enumerate(color_palette):
vis_image[image==idx] = color
imageio.imwrite(filename, vis_image)
# color by different instances (mod length of color palette)
def visualize_instance_image(filename, image):
height = image.shape[0]
width = image.shape[1]
vis_image = np.zeros([height, width, 3], dtype=np.uint8)
color_palette = create_color_palette()
instances = np.unique(image)
for idx, inst in enumerate(instances):
vis_image[image==inst] = color_palette[inst%len(color_palette)]
imageio.imwrite(filename, vis_image)
# color palette for nyu40 labels
def create_color_palette():
return [
(0, 0, 0),
(174, 199, 232), # wall
(152, 223, 138), # floor
(31, 119, 180), # cabinet
(255, 187, 120), # bed
(188, 189, 34), # chair
(140, 86, 75), # sofa
(255, 152, 150), # table
(214, 39, 40), # door
(197, 176, 213), # window
(148, 103, 189), # bookshelf
(196, 156, 148), # picture
(23, 190, 207), # counter
(178, 76, 76),
(247, 182, 210), # desk
(66, 188, 102),
(219, 219, 141), # curtain
(140, 57, 197),
(202, 185, 52),
(51, 176, 203),
(200, 54, 131),
(92, 193, 61),
(78, 71, 183),
(172, 114, 82),
(255, 127, 14), # refrigerator
(91, 163, 138),
(153, 98, 156),
(140, 153, 101),
(158, 218, 229), # shower curtain
(100, 125, 154),
(178, 127, 135),
(120, 185, 128),
(146, 111, 194),
(44, 160, 44), # toilet
(112, 128, 144), # sink
(96, 207, 209),
(227, 119, 194), # bathtub
(213, 92, 176),
(94, 106, 211),
(82, 84, 163), # otherfurn
(100, 85, 144)
]
| ContrastiveSceneContexts-main | downstream/votenet/datasets/evaluation/util.py |
import os, sys
import json
import numpy as np
from plyfile import PlyData, PlyElement
import util
# matrix: 4x4 np array
# points Nx3 np array
def transform_points(matrix, points):
assert len(points.shape) == 2 and points.shape[1] == 3
num_points = points.shape[0]
p = np.concatenate([points, np.ones((num_points, 1))], axis=1)
p = np.matmul(matrix, np.transpose(p))
p = np.transpose(p)
p[:,:3] /= p[:,3,None]
return p[:,:3]
def export_ids(filename, ids):
with open(filename, 'w') as f:
for id in ids:
f.write('%d\n' % id)
def load_ids(filename):
ids = open(filename).read().splitlines()
ids = np.array(ids, dtype=np.int64)
return ids
def read_mesh_vertices(filename):
assert os.path.isfile(filename)
with open(filename, 'rb') as f:
plydata = PlyData.read(f)
num_verts = plydata['vertex'].count
vertices = np.zeros(shape=[num_verts, 3], dtype=np.float32)
vertices[:,0] = plydata['vertex'].data['x']
vertices[:,1] = plydata['vertex'].data['y']
vertices[:,2] = plydata['vertex'].data['z']
return vertices
# export 3d instance labels for instance evaluation
def export_instance_ids_for_eval(filename, label_ids, instance_ids):
assert label_ids.shape[0] == instance_ids.shape[0]
output_mask_path_relative = 'pred_mask'
name = os.path.splitext(os.path.basename(filename))[0]
output_mask_path = os.path.join(os.path.dirname(filename), output_mask_path_relative)
if not os.path.isdir(output_mask_path):
os.mkdir(output_mask_path)
insts = np.unique(instance_ids)
zero_mask = np.zeros(shape=(instance_ids.shape[0]), dtype=np.int32)
with open(filename, 'w') as f:
for idx, inst_id in enumerate(insts):
if inst_id == 0: # 0 -> no instance for this vertex
continue
output_mask_file = os.path.join(output_mask_path_relative, name + '_' + str(idx) + '.txt')
loc = np.where(instance_ids == inst_id)
label_id = label_ids[loc[0][0]]
f.write('%s %d %f\n' % (output_mask_file, label_id, 1.0))
# write mask
mask = np.copy(zero_mask)
mask[loc[0]] = 1
export_ids(output_mask_file, mask)
def export_detection_ids_for_eval(filename, mesh_vertices, label_ids, instance_ids):
'''export the prediction file for object detection task
'''
assert label_ids.shape[0] == instance_ids.shape[0]
insts = np.unique(instance_ids)
with open(filename, 'w') as f:
for idx, inst_id in enumerate(insts):
if inst_id == 0: # 0 -> no instance for this vertex
continue
loc = np.where(instance_ids == inst_id)
inst_coord = mesh_vertices[loc[0]]
max_coord = np.amax(inst_coord, axis = 0)
min_coord = np.amin(inst_coord, axis = 0)
maxx, maxy, maxz = max_coord[0], max_coord[1], max_coord[2]
minx, miny, minz = min_coord[0], min_coord[1], min_coord[2]
label_id = label_ids[loc[0][0]]
f.write('%.2f %.2f %.2f %.2f %.2f %.2f %d\n' % (minx, miny, minz, maxx, maxy, maxz, label_id))
# ------------ Instance Utils ------------ #
class Instance(object):
instance_id = 0
label_id = 0
vert_count = 0
med_dist = -1
dist_conf = 0.0
def __init__(self, mesh_vert_instances, instance_id):
if (instance_id == -1):
return
self.instance_id = int(instance_id)
self.label_id = int(self.get_label_id(instance_id))
self.vert_count = int(self.get_instance_verts(mesh_vert_instances, instance_id))
def get_label_id(self, instance_id):
return int(instance_id // 1000)
def get_instance_verts(self, mesh_vert_instances, instance_id):
return (mesh_vert_instances == instance_id).sum()
def to_json(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
def to_dict(self):
dict = {}
dict["instance_id"] = self.instance_id
dict["label_id"] = self.label_id
dict["vert_count"] = self.vert_count
dict["med_dist"] = self.med_dist
dict["dist_conf"] = self.dist_conf
return dict
def from_json(self, data):
self.instance_id = int(data["instance_id"])
self.label_id = int(data["label_id"])
self.vert_count = int(data["vert_count"])
if ("med_dist" in data):
self.med_dist = float(data["med_dist"])
self.dist_conf = float(data["dist_conf"])
def __str__(self):
return "("+str(self.instance_id)+")"
def read_instance_prediction_file(filename, pred_path):
lines = open(filename).read().splitlines()
instance_info = {}
abs_pred_path = os.path.abspath(pred_path)
for line in lines:
parts = line.split(' ')
if len(parts) != 3:
util.print_error('invalid instance prediction file. Expected (per line): [rel path prediction] [label id prediction] [confidence prediction]', user_fault=True)
if os.path.isabs(parts[0]):
util.print_error('invalid instance prediction file. First entry in line must be a relative path', user_fault=True)
mask_file = os.path.join(os.path.dirname(filename), parts[0])
mask_file = os.path.abspath(mask_file)
# check that mask_file lives inside prediction path
if os.path.commonprefix([mask_file, abs_pred_path]) != abs_pred_path:
util.print_error('predicted mask {} in prediction text file {} points outside of prediction path.'.format(mask_file,filename), user_fault=True)
info = {}
info["label_id"] = int(float(parts[1]))
info["conf"] = float(parts[2])
instance_info[mask_file] = info
return instance_info
def get_instances(ids, class_ids, class_labels, id2label):
instances = {}
for label in class_labels:
instances[label] = []
instance_ids = np.unique(ids)
for id in instance_ids:
if id == 0:
continue
inst = Instance(ids, id)
if inst.label_id in class_ids:
instances[id2label[inst.label_id]].append(inst.to_dict())
return instances
| ContrastiveSceneContexts-main | downstream/votenet/datasets/evaluation/util_3d.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" Generic Code for Object Detection Evaluation
Input:
For each class:
For each image:
Predictions: box, score
Groundtruths: box
Output:
For each class:
precision-recal and average precision
Author: Charles R. Qi
Ref: https://raw.githubusercontent.com/rbgirshick/py-faster-rcnn/master/lib/datasets/voc_eval.py
"""
import numpy as np
def voc_ap(rec, prec, use_07_metric=False):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
from metric_util import calc_iou # axis-aligned 3D box IoU
def get_iou(bb1, bb2):
""" Compute IoU of two bounding boxes.
** Define your bod IoU function HERE **
"""
#pass
iou3d = calc_iou(bb1, bb2)
return iou3d
#from lib.utils.box_util import box3d_iou
#def get_iou_obb(bb1,bb2):
# iou3d, iou2d = box3d_iou(bb1,bb2)
# return iou3d
def get_iou_main(get_iou_func, args):
return get_iou_func(*args)
def eval_det_cls(pred, gt, ovthresh=0.25, use_07_metric=False, get_iou_func=get_iou):
""" Generic functions to compute precision/recall for object detection
for a single class.
Input:
pred: map of {img_id: [(bbox, score)]} where bbox is numpy array of size 6
gt: map of {img_id: [bbox]}
ovthresh: scalar, iou threshold
use_07_metric: bool, if True use VOC07 11 point method
Output:
rec: numpy array of length nd
prec: numpy array of length nd
ap: scalar, average precision
"""
# construct gt objects
class_recs = {} # {img_id: {'bbox': bbox list, 'det': matched list}}
npos = 0
for img_id in gt.keys():
bbox = np.array(gt[img_id]) # bbox: (n,6). n: number of bounding box in an img_id. 6 value is 3 value of center and 3 value of length
det = [False] * len(bbox) # length = n
npos += len(bbox) # sum of GT bounding boxes in all scenes
class_recs[img_id] = {'bbox': bbox, 'det': det}
# pad empty list to all other imgids
for img_id in pred.keys():
if img_id not in gt:
class_recs[img_id] = {'bbox': np.array([]), 'det': []}
# construct dets
image_ids = []
confidence = []
BB = []
for img_id in pred.keys():
for box,score in pred[img_id]:
image_ids.append(img_id)
confidence.append(score)
BB.append(box)
confidence = np.array(confidence)
BB = np.array(BB) # (nd,4 or 8,3 or 6)
# sort by confidence
sorted_ind = np.argsort(-confidence) # sort in descending order. Meaning: largest confidence first
sorted_scores = np.sort(-confidence)
BB = BB[sorted_ind, ...]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids) #nd: number of bounding boxes in all scenes
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
#if d%100==0: print(d)
R = class_recs[image_ids[d]] # all GT BB in a scene according to the level of confidence
bb = BB[d,...].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
# compute overlaps
for j in range(BBGT.shape[0]):
iou = get_iou_main(get_iou_func, (bb, BBGT[j,...]))
if iou > ovmax:
ovmax = iou # ovmax is the largest iou between BB and all ground truth boxes in BBGT
jmax = j
#print d, ovmax
if ovmax > ovthresh:
if not R['det'][jmax]:
tp[d] = 1.# if this BB have IoU > 0.25 with a GT box in BBGT, and this GTbox still not has any BB assigned to it, then this BB is TP
R['det'][jmax] = 1
else:
fp[d] = 1. #else, if this BB have IoU > 0.25 with a GT box in BBGT, and this GTbox already has a BB1 assigned to it (meaning that BB1 has higher confidence than this BB), then this BB is FP
else:
fp[d] = 1.#if this BB does not have IoU>0.25 with any GT boxes in BBGT, then it is FP
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
#print('NPOS: ', npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
return rec, prec, ap
def eval_det_cls_wrapper(arguments):
pred, gt, ovthresh, use_07_metric, get_iou_func = arguments
rec, prec, ap = eval_det_cls(pred, gt, ovthresh, use_07_metric, get_iou_func)
return (rec, prec, ap)
def eval_det(pred_all, gt_all, ovthresh=0.25, use_07_metric=False, get_iou_func=get_iou):
""" Generic functions to compute precision/recall for object detection
for multiple classes.
Input:
pred_all: map of {img_id: [(classname, bbox, score)]} where
_img_id: anything, can be integer or string
_classname: can be string or integer
_bbox: numpy array of size 6
_score: float
gt_all: map of {img_id: [(classname, bbox)]}
ovthresh: scalar, iou threshold
use_07_metric: bool, if true use VOC07 11 point method
Output:
rec: {classname: rec}
prec: {classname: prec_all}
ap: {classname: scalar}
"""
pred = {} # map {classname: pred}
gt = {} # map {classname: gt}
for img_id in pred_all.keys():
for classname, bbox, score in pred_all[img_id]:
if classname not in pred: pred[classname] = {}
if img_id not in pred[classname]:
pred[classname][img_id] = []
if classname not in gt: gt[classname] = {}
if img_id not in gt[classname]:
gt[classname][img_id] = []
pred[classname][img_id].append((bbox,score))
for img_id in gt_all.keys():
for classname, bbox in gt_all[img_id]:
if classname not in gt: gt[classname] = {}
if img_id not in gt[classname]:
gt[classname][img_id] = []
gt[classname][img_id].append(bbox)
rec = {}
prec = {}
ap = {}
for classname in gt.keys():
rec[classname], prec[classname], ap[classname] = eval_det_cls(pred[classname], gt[classname], ovthresh, use_07_metric, get_iou_func)
return rec, prec, ap
from multiprocessing import Pool
def eval_det_multiprocessing(pred_all, gt_all, ovthresh=0.25, use_07_metric=False, get_iou_func=get_iou):
""" Generic functions to compute precision/recall for object detection
for multiple classes.
Input:
pred_all: map of {img_id: [(classname, bbox, score)]}
gt_all: map of {img_id: [(classname, bbox)]}
ovthresh: scalar, iou threshold
use_07_metric: bool, if true use VOC07 11 point method
Output:
rec: {classname: rec}
prec: {classname: prec_all}
ap: {classname: scalar}
"""
pred = {} # map {classname: pred}
gt = {} # map {classname: gt}
for img_id in pred_all.keys():
for classname, bbox, score in pred_all[img_id]:
if classname not in pred: pred[classname] = {}
if img_id not in pred[classname]:
pred[classname][img_id] = []
if classname not in gt: gt[classname] = {}
if img_id not in gt[classname]:
gt[classname][img_id] = []
pred[classname][img_id].append((bbox,score))
for img_id in gt_all.keys():
for classname, bbox in gt_all[img_id]:
if classname not in gt: gt[classname] = {}
if img_id not in gt[classname]:
gt[classname][img_id] = []
gt[classname][img_id].append(bbox)
rec = {}
prec = {}
ap = {}
p = Pool(processes=10)
ret_values = p.map(eval_det_cls_wrapper, [(pred[classname], gt[classname], ovthresh, use_07_metric, get_iou_func) for classname in gt.keys() if classname in pred])
p.close()
for i, classname in enumerate(gt.keys()):
if classname in pred:
rec[classname], prec[classname], ap[classname] = ret_values[i]
else:
rec[classname] = 0
prec[classname] = 0
ap[classname] = 0
print(classname, ap[classname])
return rec, prec, ap
| ContrastiveSceneContexts-main | downstream/votenet/datasets/evaluation/evaluate_object_detection_helper.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" Load Scannet scenes with vertices and ground truth labels
for semantic and instance segmentations
"""
# python imports
import math
import os, sys, argparse
import inspect
import json
import pdb
try:
import numpy as np
except:
print("Failed to import numpy package.")
sys.exit(-1)
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
import scannet_utils
def read_aggregation(filename):
assert os.path.isfile(filename)
object_id_to_segs = {}
label_to_segs = {}
with open(filename) as f:
data = json.load(f)
num_objects = len(data['segGroups'])
for i in range(num_objects):
object_id = data['segGroups'][i]['objectId'] + 1 # instance ids should be 1-indexed
label = data['segGroups'][i]['label']
segs = data['segGroups'][i]['segments']
object_id_to_segs[object_id] = segs
if label in label_to_segs:
label_to_segs[label].extend(segs)
else:
label_to_segs[label] = segs
return object_id_to_segs, label_to_segs
def read_segmentation(filename):
assert os.path.isfile(filename)
seg_to_verts = {}
with open(filename) as f:
data = json.load(f)
num_verts = len(data['segIndices'])
for i in range(num_verts):
seg_id = data['segIndices'][i]
if seg_id in seg_to_verts:
seg_to_verts[seg_id].append(i)
else:
seg_to_verts[seg_id] = [i]
return seg_to_verts, num_verts
def export(mesh_file, agg_file, seg_file, meta_file, label_map_file, output_file=None):
""" points are XYZ RGB (RGB in 0-255),
semantic label as nyu40 ids,
instance label as 1-#instance,
box as (cx,cy,cz,dx,dy,dz,semantic_label)
"""
label_map = scannet_utils.read_label_mapping(label_map_file,
label_from='raw_category', label_to='nyu40id')
mesh_vertices = scannet_utils.read_mesh_vertices_rgb(mesh_file)
# Load scene axis alignment matrix
lines = open(meta_file).readlines()
for line in lines:
if 'axisAlignment' in line:
axis_align_matrix = [float(x) \
for x in line.rstrip().strip('axisAlignment = ').split(' ')]
break
axis_align_matrix = np.array(axis_align_matrix).reshape((4,4))
pts = np.ones((mesh_vertices.shape[0], 4))
pts[:,0:3] = mesh_vertices[:,0:3]
pts = np.dot(pts, axis_align_matrix.transpose()) # Nx4
mesh_vertices[:,0:3] = pts[:,0:3]
# Load semantic and instance labels
object_id_to_segs, label_to_segs = read_aggregation(agg_file)
seg_to_verts, num_verts = read_segmentation(seg_file)
label_ids = np.zeros(shape=(num_verts), dtype=np.uint32) # 0: unannotated
object_id_to_label_id = {}
for label, segs in label_to_segs.items():
label_id = label_map[label]
for seg in segs:
verts = seg_to_verts[seg]
label_ids[verts] = label_id
instance_ids = np.zeros(shape=(num_verts), dtype=np.uint32) # 0: unannotated
num_instances = len(np.unique(list(object_id_to_segs.keys())))
for object_id, segs in object_id_to_segs.items():
for seg in segs:
verts = seg_to_verts[seg]
instance_ids[verts] = object_id
if object_id not in object_id_to_label_id:
object_id_to_label_id[object_id] = label_ids[verts][0]
instance_bboxes = np.zeros((num_instances,7))
for obj_id in object_id_to_segs:
label_id = object_id_to_label_id[obj_id]
obj_pc = mesh_vertices[instance_ids==obj_id, 0:3]
if len(obj_pc) == 0: continue
# Compute axis aligned box
# An axis aligned bounding box is parameterized by
# (cx,cy,cz) and (dx,dy,dz) and label id
# where (cx,cy,cz) is the center point of the box,
# dx is the x-axis length of the box.
xmin = np.min(obj_pc[:,0])
ymin = np.min(obj_pc[:,1])
zmin = np.min(obj_pc[:,2])
xmax = np.max(obj_pc[:,0])
ymax = np.max(obj_pc[:,1])
zmax = np.max(obj_pc[:,2])
bbox = np.array([(xmin+xmax)/2, (ymin+ymax)/2, (zmin+zmax)/2,
xmax-xmin, ymax-ymin, zmax-zmin, label_id])
# NOTE: this assumes obj_id is in 1,2,3,.,,,.NUM_INSTANCES
instance_bboxes[obj_id-1,:] = bbox
if output_file is not None:
np.save(output_file+'_vert.npy', mesh_vertices)
np.save(output_file+'_sem_label.npy', label_ids)
np.save(output_file+'_ins_label.npy', instance_ids)
np.save(output_file+'_bbox.npy', instance_bboxes)
return mesh_vertices, label_ids, instance_ids,\
instance_bboxes, object_id_to_label_id
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--scan_path', required=True, help='path to scannet scene (e.g., data/ScanNet/v2/scene0000_00')
parser.add_argument('--output_file', required=True, help='output file')
parser.add_argument('--label_map_file', required=True, help='path to scannetv2-labels.combined.tsv')
opt = parser.parse_args()
scan_name = os.path.split(opt.scan_path)[-1]
mesh_file = os.path.join(opt.scan_path, scan_name + '_vh_clean_2.ply')
agg_file = os.path.join(opt.scan_path, scan_name + '.aggregation.json')
seg_file = os.path.join(opt.scan_path, scan_name + '_vh_clean_2.0.010000.segs.json')
meta_file = os.path.join(opt.scan_path, scan_name + '.txt') # includes axisAlignment info for the train set scans.
export(mesh_file, agg_file, seg_file, meta_file, opt.label_map_file, opt.output_file)
if __name__ == '__main__':
main()
| ContrastiveSceneContexts-main | downstream/votenet/datasets/scannet/load_scannet_data.py |
# coding: utf-8
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" Dataset for object bounding box regression.
An axis aligned bounding box is parameterized by (cx,cy,cz) and (dx,dy,dz)
where (cx,cy,cz) is the center point of the box, dx is the x-axis length of the box.
"""
import os
import sys
import torch
import numpy as np
from torch.utils.data import Dataset
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
from lib.utils import pc_util
from datasets.scannet.model_util_scannet import rotate_aligned_boxes, ScannetDatasetConfig
DC = ScannetDatasetConfig()
MAX_NUM_OBJ = 64
MEAN_COLOR_RGB = np.array([109.8, 97.2, 83.8])
class ScannetDetectionDataset(Dataset):
def __init__(self, split_set='train', num_points=20000,
use_color=False, use_height=False, augment=False, by_scenes=None, by_points=None):
self.data_path = os.path.join(BASE_DIR, 'scannet_train_detection_data')
all_scan_names = list(set([os.path.basename(x)[0:12] \
for x in os.listdir(self.data_path) if x.startswith('scene')]))
#if split_set=='all':
# self.scan_names = all_scan_names
if split_set in ['train', 'val', 'test']:
split_filenames = os.path.join(ROOT_DIR, 'scannet/meta_data',
'scannetv2_{}.txt'.format(split_set))
if by_scenes != None and split_set == 'train':
split_filenames = by_scenes
self.sampled_bbox = {}
if by_points !=None and split_set == 'train':
self.sampled_bbox = torch.load(by_points)
with open(split_filenames, 'r') as f:
self.scan_names = f.read().splitlines()
# remove unavailiable scans
num_scans = len(self.scan_names)
self.scan_names = [sname for sname in self.scan_names if sname in all_scan_names]
print('kept {} scans out of {}'.format(len(self.scan_names), num_scans))
num_scans = len(self.scan_names)
else:
print('illegal split name')
return
self.num_points = num_points
self.use_color = use_color
self.use_height = use_height
self.augment = augment
def __len__(self):
return len(self.scan_names)
def __getitem__(self, idx):
"""
Returns a dict with following keys:
point_clouds: (N,3+C)
center_label: (MAX_NUM_OBJ,3) for GT box center XYZ
sem_cls_label: (MAX_NUM_OBJ,) semantic class index
angle_class_label: (MAX_NUM_OBJ,) with int values in 0,...,NUM_HEADING_BIN-1
angle_residual_label: (MAX_NUM_OBJ,)
size_classe_label: (MAX_NUM_OBJ,) with int values in 0,...,NUM_SIZE_CLUSTER
size_residual_label: (MAX_NUM_OBJ,3)
box_label_mask: (MAX_NUM_OBJ) as 0/1 with 1 indicating a unique box
point_votes: (N,3) with votes XYZ
point_votes_mask: (N,) with 0/1 with 1 indicating the point is in one of the object's OBB.
scan_idx: int scan index in scan_names list
pcl_color: unused
"""
scan_name = self.scan_names[idx]
mesh_vertices = np.load(os.path.join(self.data_path, scan_name)+'_vert.npy')
if os.path.exists(os.path.join(self.data_path, scan_name)+'_ins_label.npy'):
instance_labels = np.load(os.path.join(self.data_path, scan_name)+'_ins_label.npy')
semantic_labels = np.load(os.path.join(self.data_path, scan_name)+'_sem_label.npy')
instance_bboxes = np.load(os.path.join(self.data_path, scan_name)+'_bbox.npy')
else:
instance_labels = np.ones(mesh_vertices.shape[0])
semantic_labels = np.ones(mesh_vertices.shape[0])
instance_bboxes = np.ones((12,7)) + 2
#from lib.utils.io3d import generate_bbox_mesh, write_triangle_mesh
#new_instance_box = np.zeros_like(instance_bboxes)
#new_instance_box[:, 0] = instance_bboxes[:, 0] - instance_bboxes[:, 3] / 2.0
#new_instance_box[:, 1] = instance_bboxes[:, 1] - instance_bboxes[:, 4] / 2.0
#new_instance_box[:, 2] = instance_bboxes[:, 2] - instance_bboxes[:, 5] / 2.0
#new_instance_box[:, 3] = instance_bboxes[:, 0] + instance_bboxes[:, 3] / 2.0
#new_instance_box[:, 4] = instance_bboxes[:, 1] + instance_bboxes[:, 4] / 2.0
#new_instance_box[:, 5] = instance_bboxes[:, 2] + instance_bboxes[:, 5] / 2.0
#import ipdb
#ipdb.set_trace()
#vertices, _, faces = generate_bbox_mesh(new_instance_box)
#write_triangle_mesh(vertices, None, faces, 'test1.ply')
if self.sampled_bbox and scan_name in self.sampled_bbox:
sampled_bbox = self.sampled_bbox[scan_name][0]
sampled_instances = self.sampled_bbox[scan_name][1]
mask_valid = np.zeros_like(instance_labels).astype(np.bool)
for sampled_instance in sampled_instances:
mask_valid = mask_valid | (instance_labels == sampled_instance)
mask_nonvalid = ~mask_valid
semantic_labels[mask_nonvalid] = -1
instance_labels[mask_nonvalid] = -1
if len(instance_bboxes) != 0:
instance_bboxes = instance_bboxes[sampled_bbox]
# subsampling happens here
point_cloud = mesh_vertices[:,0:3] # do not use color for now
pcl_color = (mesh_vertices[:,3:6]-MEAN_COLOR_RGB)/256.0
#pcl_color = np.ones_like(mesh_vertices[:,3:6])
if self.use_height:
floor_height = np.percentile(point_cloud[:,2],0.99)
height = point_cloud[:,2] - floor_height
point_cloud = np.concatenate([point_cloud, np.expand_dims(height, 1)],1)
# ------------------------------- LABELS ------------------------------
target_bboxes = np.zeros((MAX_NUM_OBJ, 6))
target_bboxes_mask = np.zeros((MAX_NUM_OBJ))
angle_classes = np.zeros((MAX_NUM_OBJ,))
angle_residuals = np.zeros((MAX_NUM_OBJ,))
size_classes = np.zeros((MAX_NUM_OBJ,))
size_residuals = np.zeros((MAX_NUM_OBJ, 3))
point_cloud, choices = pc_util.random_sampling(point_cloud,
self.num_points, return_choices=True)
instance_labels = instance_labels[choices]
semantic_labels = semantic_labels[choices]
pcl_color = pcl_color[choices]
target_bboxes_mask[0:instance_bboxes.shape[0]] = 1
target_bboxes[0:instance_bboxes.shape[0],:] = instance_bboxes[:,0:6]
# ------------------------------- DATA AUGMENTATION ------------------------------
if self.augment:
if np.random.random() > 0.5:
# Flipping along the YZ plane
point_cloud[:,0] = -1 * point_cloud[:,0]
target_bboxes[:,0] = -1 * target_bboxes[:,0]
if np.random.random() > 0.5:
# Flipping along the XZ plane
point_cloud[:,1] = -1 * point_cloud[:,1]
target_bboxes[:,1] = -1 * target_bboxes[:,1]
# Rotation along up-axis/Z-axis
rot_angle = (np.random.random()*np.pi/18) - np.pi/36 # -5 ~ +5 degree
rot_mat = pc_util.rotz(rot_angle)
point_cloud[:,0:3] = np.dot(point_cloud[:,0:3], np.transpose(rot_mat))
target_bboxes = rotate_aligned_boxes(target_bboxes, rot_mat)
# compute votes *AFTER* augmentation
# generate votes
# Note: since there's no map between bbox instance labels and
# pc instance_labels (it had been filtered
# in the data preparation step) we'll compute the instance bbox
# from the points sharing the same instance label.
point_votes = np.zeros([self.num_points, 3])
point_votes_mask = np.zeros(self.num_points)
for i_instance in np.unique(instance_labels):
# find all points belong to that instance
ind = np.where(instance_labels == i_instance)[0]
# find the semantic label
if semantic_labels[ind[0]] in DC.nyu40ids:
x = point_cloud[ind,:3]
center = 0.5*(x.min(0) + x.max(0))
point_votes[ind, :] = center - x
point_votes_mask[ind] = 1.0
point_votes = np.tile(point_votes, (1, 3)) # make 3 votes identical
class_ind = [np.where(DC.nyu40ids == x)[0][0] for x in instance_bboxes[:,-1]]
# NOTE: set size class as semantic class. Consider use size2class.
size_classes[0:instance_bboxes.shape[0]] = class_ind
size_residuals[0:instance_bboxes.shape[0], :] = \
target_bboxes[0:instance_bboxes.shape[0], 3:6] - DC.mean_size_arr[class_ind,:]
ret_dict = {}
ret_dict['point_clouds'] = point_cloud.astype(np.float32)
ret_dict['center_label'] = target_bboxes.astype(np.float32)[:,0:3]
ret_dict['heading_class_label'] = angle_classes.astype(np.int64)
ret_dict['heading_residual_label'] = angle_residuals.astype(np.float32)
ret_dict['size_class_label'] = size_classes.astype(np.int64)
ret_dict['size_residual_label'] = size_residuals.astype(np.float32)
target_bboxes_semcls = np.zeros((MAX_NUM_OBJ))
target_bboxes_semcls[0:instance_bboxes.shape[0]] = \
[DC.nyu40id2class[x] for x in instance_bboxes[:,-1][0:instance_bboxes.shape[0]]]
ret_dict['sem_cls_label'] = target_bboxes_semcls.astype(np.int64)
ret_dict['box_label_mask'] = target_bboxes_mask.astype(np.float32)
ret_dict['vote_label'] = point_votes.astype(np.float32)
ret_dict['vote_label_mask'] = point_votes_mask.astype(np.int64)
ret_dict['scan_idx'] = np.array(idx).astype(np.int64)
ret_dict['pcl_color'] = pcl_color
ret_dict['scan_name'] = scan_name
return ret_dict
############# Visualizaion ########
def viz_votes(pc, point_votes, point_votes_mask, name=''):
""" Visualize point votes and point votes mask labels
pc: (N,3 or 6), point_votes: (N,9), point_votes_mask: (N,)
"""
inds = (point_votes_mask==1)
pc_obj = pc[inds,0:3]
pc_obj_voted1 = pc_obj + point_votes[inds,0:3]
pc_util.write_ply(pc_obj, 'pc_obj{}.ply'.format(name))
pc_util.write_ply(pc_obj_voted1, 'pc_obj_voted1{}.ply'.format(name))
def viz_obb(pc, label, mask, angle_classes, angle_residuals,
size_classes, size_residuals, name=''):
""" Visualize oriented bounding box ground truth
pc: (N,3)
label: (K,3) K == MAX_NUM_OBJ
mask: (K,)
angle_classes: (K,)
angle_residuals: (K,)
size_classes: (K,)
size_residuals: (K,3)
"""
oriented_boxes = []
K = label.shape[0]
for i in range(K):
if mask[i] == 0: continue
obb = np.zeros(7)
obb[0:3] = label[i,0:3]
heading_angle = 0 # hard code to 0
box_size = DC.mean_size_arr[size_classes[i], :] + size_residuals[i, :]
obb[3:6] = box_size
obb[6] = -1 * heading_angle
print(obb)
oriented_boxes.append(obb)
pc_util.write_oriented_bbox(oriented_boxes, 'gt_obbs{}.ply'.format(name))
pc_util.write_ply(label[mask==1,:], 'gt_centroids{}.ply'.format(name))
if __name__=='__main__':
dset = ScannetDetectionDataset(use_height=True, num_points=40000)
for i_example in range(4):
example = dset.__getitem__(1)
pc_util.write_ply(example['point_clouds'], 'pc_{}.ply'.format(i_example))
viz_votes(example['point_clouds'], example['vote_label'],
example['vote_label_mask'],name=i_example)
viz_obb(pc=example['point_clouds'], label=example['center_label'],
mask=example['box_label_mask'],
angle_classes=None, angle_residuals=None,
size_classes=example['size_class_label'], size_residuals=example['size_residual_label'],
name=i_example)
| ContrastiveSceneContexts-main | downstream/votenet/datasets/scannet/scannet_detection_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" Batch mode in loading Scannet scenes with vertices and ground truth labels
for semantic and instance segmentations
Usage example: python ./batch_load_scannet_data.py
"""
import os
import sys
import datetime
import numpy as np
from load_scannet_data import export
import pdb
SCANNET_DIR = 'scans'
TRAIN_SCAN_NAMES = [line.rstrip() for line in open('meta_data/scannet_train.txt')]
LABEL_MAP_FILE = 'meta_data/scannetv2-labels.combined.tsv'
DONOTCARE_CLASS_IDS = np.array([])
OBJ_CLASS_IDS = np.array([3,4,5,6,7,8,9,10,11,12,14,16,24,28,33,34,36,39])
MAX_NUM_POINT = 50000
OUTPUT_FOLDER = './scannet_train_detection_data'
def export_one_scan(scan_name, output_filename_prefix):
mesh_file = os.path.join(SCANNET_DIR, scan_name, scan_name + '_vh_clean_2.ply')
agg_file = os.path.join(SCANNET_DIR, scan_name, scan_name + '.aggregation.json')
seg_file = os.path.join(SCANNET_DIR, scan_name, scan_name + '_vh_clean_2.0.010000.segs.json')
meta_file = os.path.join(SCANNET_DIR, scan_name, scan_name + '.txt') # includes axisAlignment info for the train set scans.
mesh_vertices, semantic_labels, instance_labels, instance_bboxes, instance2semantic = \
export(mesh_file, agg_file, seg_file, meta_file, LABEL_MAP_FILE, None)
mask = np.logical_not(np.in1d(semantic_labels, DONOTCARE_CLASS_IDS))
mesh_vertices = mesh_vertices[mask,:]
semantic_labels = semantic_labels[mask]
instance_labels = instance_labels[mask]
num_instances = len(np.unique(instance_labels))
print('Num of instances: ', num_instances)
bbox_mask = np.in1d(instance_bboxes[:,-1], OBJ_CLASS_IDS)
instance_bboxes = instance_bboxes[bbox_mask,:]
print('Num of care instances: ', instance_bboxes.shape[0])
N = mesh_vertices.shape[0]
if N > MAX_NUM_POINT:
choices = np.random.choice(N, MAX_NUM_POINT, replace=False)
mesh_vertices = mesh_vertices[choices, :]
semantic_labels = semantic_labels[choices]
instance_labels = instance_labels[choices]
np.save(output_filename_prefix+'_vert.npy', mesh_vertices)
np.save(output_filename_prefix+'_sem_label.npy', semantic_labels)
np.save(output_filename_prefix+'_ins_label.npy', instance_labels)
np.save(output_filename_prefix+'_bbox.npy', instance_bboxes)
def batch_export():
if not os.path.exists(OUTPUT_FOLDER):
print('Creating new data folder: {}'.format(OUTPUT_FOLDER))
os.mkdir(OUTPUT_FOLDER)
for scan_name in TRAIN_SCAN_NAMES:
print('-'*20+'begin')
print(datetime.datetime.now())
print(scan_name)
output_filename_prefix = os.path.join(OUTPUT_FOLDER, scan_name)
if os.path.isfile(output_filename_prefix+'_vert.npy'):
print('File already exists. skipping.')
print('-'*20+'done')
continue
try:
export_one_scan(scan_name, output_filename_prefix)
except:
print('Failed export scan: %s'%(scan_name))
print('-'*20+'done')
if __name__=='__main__':
batch_export()
| ContrastiveSceneContexts-main | downstream/votenet/datasets/scannet/batch_load_scannet_data.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
from box_util import get_3d_box
class ScannetDatasetConfig(object):
def __init__(self):
self.num_class = 18
self.num_heading_bin = 1
self.num_size_cluster = 18
self.type2class = {'cabinet':0, 'bed':1, 'chair':2, 'sofa':3, 'table':4, 'door':5,
'window':6,'bookshelf':7,'picture':8, 'counter':9, 'desk':10, 'curtain':11,
'refrigerator':12, 'showercurtrain':13, 'toilet':14, 'sink':15, 'bathtub':16, 'garbagebin':17}
self.class2type = {self.type2class[t]:t for t in self.type2class}
self.nyu40ids = np.array([3,4,5,6,7,8,9,10,11,12,14,16,24,28,33,34,36,39])
self.nyu40id2class = {nyu40id: i for i,nyu40id in enumerate(list(self.nyu40ids))}
self.mean_size_arr = np.load(os.path.join(ROOT_DIR,'scannet/meta_data/scannet_means.npz'))['arr_0']
self.type_mean_size = {}
for i in range(self.num_size_cluster):
self.type_mean_size[self.class2type[i]] = self.mean_size_arr[i,:]
def angle2class(self, angle):
''' Convert continuous angle to discrete class
[optinal] also small regression number from
class center angle to current angle.
angle is from 0-2pi (or -pi~pi), class center at 0, 1*(2pi/N), 2*(2pi/N) ... (N-1)*(2pi/N)
return is class of int32 of 0,1,...,N-1 and a number such that
class*(2pi/N) + number = angle
NOT USED.
'''
assert(False)
def class2angle(self, pred_cls, residual, to_label_format=True):
''' Inverse function to angle2class.
As ScanNet only has axis-alined boxes so angles are always 0. '''
return 0
def size2class(self, size, type_name):
''' Convert 3D box size (l,w,h) to size class and size residual '''
size_class = self.type2class[type_name]
size_residual = size - self.type_mean_size[type_name]
return size_class, size_residual
def class2size(self, pred_cls, residual):
''' Inverse function to size2class '''
return self.mean_size_arr[pred_cls, :] + residual
def param2obb(self, center, heading_class, heading_residual, size_class, size_residual):
heading_angle = self.class2angle(heading_class, heading_residual)
box_size = self.class2size(int(size_class), size_residual)
obb = np.zeros((7,))
obb[0:3] = center
obb[3:6] = box_size
obb[6] = heading_angle*-1
return obb
def rotate_aligned_boxes(input_boxes, rot_mat):
centers, lengths = input_boxes[:,0:3], input_boxes[:,3:6]
new_centers = np.dot(centers, np.transpose(rot_mat))
dx, dy = lengths[:,0]/2.0, lengths[:,1]/2.0
new_x = np.zeros((dx.shape[0], 4))
new_y = np.zeros((dx.shape[0], 4))
for i, crnr in enumerate([(-1,-1), (1, -1), (1, 1), (-1, 1)]):
crnrs = np.zeros((dx.shape[0], 3))
crnrs[:,0] = crnr[0]*dx
crnrs[:,1] = crnr[1]*dy
crnrs = np.dot(crnrs, np.transpose(rot_mat))
new_x[:,i] = crnrs[:,0]
new_y[:,i] = crnrs[:,1]
new_dx = 2.0*np.max(new_x, 1)
new_dy = 2.0*np.max(new_y, 1)
new_lengths = np.stack((new_dx, new_dy, lengths[:,2]), axis=1)
return np.concatenate([new_centers, new_lengths], axis=1)
| ContrastiveSceneContexts-main | downstream/votenet/datasets/scannet/model_util_scannet.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
import os
BASE_DIR = os.path.dirname(__file__)
sys.path.append(BASE_DIR)
import numpy as np
import pc_util
scene_name = 'scannet_train_detection_data/scene0002_00'
output_folder = 'data_viz_dump'
data = np.load(scene_name+'_vert.npy')
scene_points = data[:,0:3]
colors = data[:,3:]
instance_labels = np.load(scene_name+'_ins_label.npy')
semantic_labels = np.load(scene_name+'_sem_label.npy')
instance_bboxes = np.load(scene_name+'_bbox.npy')
print(np.unique(instance_labels))
print(np.unique(semantic_labels))
input()
if not os.path.exists(output_folder):
os.mkdir(output_folder)
# Write scene as OBJ file for visualization
pc_util.write_ply_rgb(scene_points, colors, os.path.join(output_folder, 'scene.obj'))
pc_util.write_ply_color(scene_points, instance_labels, os.path.join(output_folder, 'scene_instance.obj'))
pc_util.write_ply_color(scene_points, semantic_labels, os.path.join(output_folder, 'scene_semantic.obj'))
from model_util_scannet import ScannetDatasetConfig
DC = ScannetDatasetConfig()
print(instance_bboxes.shape)
| ContrastiveSceneContexts-main | downstream/votenet/datasets/scannet/data_viz.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
''' Ref: https://github.com/ScanNet/ScanNet/blob/master/BenchmarkScripts '''
import os
import sys
import json
import csv
try:
import numpy as np
except:
print("Failed to import numpy package.")
sys.exit(-1)
try:
from plyfile import PlyData, PlyElement
except:
print("Please install the module 'plyfile' for PLY i/o, e.g.")
print("pip install plyfile")
sys.exit(-1)
def represents_int(s):
''' if string s represents an int. '''
try:
int(s)
return True
except ValueError:
return False
def read_label_mapping(filename, label_from='raw_category', label_to='nyu40id'):
assert os.path.isfile(filename)
mapping = dict()
with open(filename) as csvfile:
reader = csv.DictReader(csvfile, delimiter='\t')
for row in reader:
mapping[row[label_from]] = int(row[label_to])
if represents_int(list(mapping.keys())[0]):
mapping = {int(k):v for k,v in mapping.items()}
return mapping
def read_mesh_vertices(filename):
""" read XYZ for each vertex.
"""
assert os.path.isfile(filename)
with open(filename, 'rb') as f:
plydata = PlyData.read(f)
num_verts = plydata['vertex'].count
vertices = np.zeros(shape=[num_verts, 3], dtype=np.float32)
vertices[:,0] = plydata['vertex'].data['x']
vertices[:,1] = plydata['vertex'].data['y']
vertices[:,2] = plydata['vertex'].data['z']
return vertices
def read_mesh_vertices_rgb(filename):
""" read XYZ RGB for each vertex.
Note: RGB values are in 0-255
"""
assert os.path.isfile(filename)
with open(filename, 'rb') as f:
plydata = PlyData.read(f)
num_verts = plydata['vertex'].count
vertices = np.zeros(shape=[num_verts, 6], dtype=np.float32)
vertices[:,0] = plydata['vertex'].data['x']
vertices[:,1] = plydata['vertex'].data['y']
vertices[:,2] = plydata['vertex'].data['z']
vertices[:,3] = plydata['vertex'].data['red']
vertices[:,4] = plydata['vertex'].data['green']
vertices[:,5] = plydata['vertex'].data['blue']
return vertices
| ContrastiveSceneContexts-main | downstream/votenet/datasets/scannet/scannet_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import numpy as np
import sys
import os
from lib.utils.nn_distance import nn_distance, huber_loss
FAR_THRESHOLD = 0.6
NEAR_THRESHOLD = 0.3
GT_VOTE_FACTOR = 3 # number of GT votes per point
OBJECTNESS_CLS_WEIGHTS = [0.2,0.8] # put larger weights on positive objectness
def compute_vote_loss(end_points):
""" Compute vote loss: Match predicted votes to GT votes.
Args:
end_points: dict (read-only)
Returns:
vote_loss: scalar Tensor
Overall idea:
If the seed point belongs to an object (votes_label_mask == 1),
then we require it to vote for the object center.
Each seed point may vote for multiple translations v1,v2,v3
A seed point may also be in the boxes of multiple objects:
o1,o2,o3 with corresponding GT votes c1,c2,c3
Then the loss for this seed point is:
min(d(v_i,c_j)) for i=1,2,3 and j=1,2,3
"""
# Load ground truth votes and assign them to seed points
batch_size = end_points['seed_xyz'].shape[0]
num_seed = end_points['seed_xyz'].shape[1] # B,num_seed,3
vote_xyz = end_points['vote_xyz'] # B,num_seed*vote_factor,3
seed_inds = end_points['seed_inds'].long() # B,num_seed in [0,num_points-1]
# Get groundtruth votes for the seed points
# vote_label_mask: Use gather to select B,num_seed from B,num_point
# non-object point has no GT vote mask = 0, object point has mask = 1
# vote_label: Use gather to select B,num_seed,9 from B,num_point,9
# with inds in shape B,num_seed,9 and 9 = GT_VOTE_FACTOR * 3
seed_gt_votes_mask = torch.gather(end_points['vote_label_mask'], 1, seed_inds)
seed_inds_expand = seed_inds.view(batch_size,num_seed,1).repeat(1,1,3*GT_VOTE_FACTOR)
seed_gt_votes = torch.gather(end_points['vote_label'], 1, seed_inds_expand)
seed_gt_votes += end_points['seed_xyz'].repeat(1,1,3)
# Compute the min of min of distance
vote_xyz_reshape = vote_xyz.view(batch_size*num_seed, -1, 3) # from B,num_seed*vote_factor,3 to B*num_seed,vote_factor,3
seed_gt_votes_reshape = seed_gt_votes.view(batch_size*num_seed, GT_VOTE_FACTOR, 3) # from B,num_seed,3*GT_VOTE_FACTOR to B*num_seed,GT_VOTE_FACTOR,3
# A predicted vote to no where is not penalized as long as there is a good vote near the GT vote.
dist1, _, dist2, _ = nn_distance(vote_xyz_reshape, seed_gt_votes_reshape, l1=True)
votes_dist, _ = torch.min(dist2, dim=1) # (B*num_seed,vote_factor) to (B*num_seed,)
votes_dist = votes_dist.view(batch_size, num_seed)
vote_loss = torch.sum(votes_dist*seed_gt_votes_mask.float())/(torch.sum(seed_gt_votes_mask.float())+1e-6)
return vote_loss
def compute_objectness_loss(end_points):
""" Compute objectness loss for the proposals.
Args:
end_points: dict (read-only)
Returns:
objectness_loss: scalar Tensor
objectness_label: (batch_size, num_seed) Tensor with value 0 or 1
objectness_mask: (batch_size, num_seed) Tensor with value 0 or 1
object_assignment: (batch_size, num_seed) Tensor with long int
within [0,num_gt_object-1]
"""
# Associate proposal and GT objects by point-to-point distances
aggregated_vote_xyz = end_points['aggregated_vote_xyz']
gt_center = end_points['center_label'][:,:,0:3]
B = gt_center.shape[0]
K = aggregated_vote_xyz.shape[1]
K2 = gt_center.shape[1]
dist1, ind1, dist2, _ = nn_distance(aggregated_vote_xyz, gt_center) # dist1: BxK, dist2: BxK2
# Generate objectness label and mask
# objectness_label: 1 if pred object center is within NEAR_THRESHOLD of any GT object
# objectness_mask: 0 if pred object center is in gray zone (DONOTCARE), 1 otherwise
euclidean_dist1 = torch.sqrt(dist1+1e-6)
objectness_label = torch.zeros((B,K), dtype=torch.long).cuda()
objectness_mask = torch.zeros((B,K)).cuda()
objectness_label[euclidean_dist1<NEAR_THRESHOLD] = 1
objectness_mask[euclidean_dist1<NEAR_THRESHOLD] = 1
objectness_mask[euclidean_dist1>FAR_THRESHOLD] = 1
# Compute objectness loss
objectness_scores = end_points['objectness_scores']
criterion = nn.CrossEntropyLoss(torch.Tensor(OBJECTNESS_CLS_WEIGHTS).cuda(), reduction='none')
objectness_loss = criterion(objectness_scores.transpose(2,1), objectness_label)
objectness_loss = torch.sum(objectness_loss * objectness_mask)/(torch.sum(objectness_mask)+1e-6)
# Set assignment
object_assignment = ind1 # (B,K) with values in 0,1,...,K2-1
return objectness_loss, objectness_label, objectness_mask, object_assignment
def compute_box_and_sem_cls_loss(end_points, config):
""" Compute 3D bounding box and semantic classification loss.
Args:
end_points: dict (read-only)
Returns:
center_loss
heading_cls_loss
heading_reg_loss
size_cls_loss
size_reg_loss
sem_cls_loss
"""
num_heading_bin = config.num_heading_bin
num_size_cluster = config.num_size_cluster
num_class = config.num_class
mean_size_arr = config.mean_size_arr
object_assignment = end_points['object_assignment']
batch_size = object_assignment.shape[0]
# Compute center loss
pred_center = end_points['center']
gt_center = end_points['center_label'][:,:,0:3]
dist1, ind1, dist2, _ = nn_distance(pred_center, gt_center) # dist1: BxK, dist2: BxK2
box_label_mask = end_points['box_label_mask']
objectness_label = end_points['objectness_label'].float()
centroid_reg_loss1 = \
torch.sum(dist1*objectness_label)/(torch.sum(objectness_label)+1e-6)
centroid_reg_loss2 = \
torch.sum(dist2*box_label_mask)/(torch.sum(box_label_mask)+1e-6)
center_loss = centroid_reg_loss1 + centroid_reg_loss2
# Compute heading loss
heading_class_label = torch.gather(end_points['heading_class_label'], 1, object_assignment) # select (B,K) from (B,K2)
criterion_heading_class = nn.CrossEntropyLoss(reduction='none')
heading_class_loss = criterion_heading_class(end_points['heading_scores'].transpose(2,1), heading_class_label) # (B,K)
heading_class_loss = torch.sum(heading_class_loss * objectness_label)/(torch.sum(objectness_label)+1e-6)
heading_residual_label = torch.gather(end_points['heading_residual_label'], 1, object_assignment) # select (B,K) from (B,K2)
heading_residual_normalized_label = heading_residual_label / (np.pi/num_heading_bin)
# Ref: https://discuss.pytorch.org/t/convert-int-into-one-hot-format/507/3
heading_label_one_hot = torch.cuda.FloatTensor(batch_size, heading_class_label.shape[1], num_heading_bin).zero_()
heading_label_one_hot.scatter_(2, heading_class_label.unsqueeze(-1), 1) # src==1 so it's *one-hot* (B,K,num_heading_bin)
heading_residual_normalized_loss = huber_loss(torch.sum(end_points['heading_residuals_normalized']*heading_label_one_hot, -1) - heading_residual_normalized_label, delta=1.0) # (B,K)
heading_residual_normalized_loss = torch.sum(heading_residual_normalized_loss*objectness_label)/(torch.sum(objectness_label)+1e-6)
# Compute size loss
size_class_label = torch.gather(end_points['size_class_label'], 1, object_assignment) # select (B,K) from (B,K2)
criterion_size_class = nn.CrossEntropyLoss(reduction='none')
size_class_loss = criterion_size_class(end_points['size_scores'].transpose(2,1), size_class_label) # (B,K)
size_class_loss = torch.sum(size_class_loss * objectness_label)/(torch.sum(objectness_label)+1e-6)
size_residual_label = torch.gather(end_points['size_residual_label'], 1, object_assignment.unsqueeze(-1).repeat(1,1,3)) # select (B,K,3) from (B,K2,3)
size_label_one_hot = torch.cuda.FloatTensor(batch_size, size_class_label.shape[1], num_size_cluster).zero_()
size_label_one_hot.scatter_(2, size_class_label.unsqueeze(-1), 1) # src==1 so it's *one-hot* (B,K,num_size_cluster)
size_label_one_hot_tiled = size_label_one_hot.unsqueeze(-1).repeat(1,1,1,3) # (B,K,num_size_cluster,3)
predicted_size_residual_normalized = torch.sum(end_points['size_residuals_normalized']*size_label_one_hot_tiled, 2) # (B,K,3)
mean_size_arr_expanded = torch.from_numpy(mean_size_arr.astype(np.float32)).cuda().unsqueeze(0).unsqueeze(0) # (1,1,num_size_cluster,3)
mean_size_label = torch.sum(size_label_one_hot_tiled * mean_size_arr_expanded, 2) # (B,K,3)
size_residual_label_normalized = size_residual_label / mean_size_label # (B,K,3)
size_residual_normalized_loss = torch.mean(huber_loss(predicted_size_residual_normalized - size_residual_label_normalized, delta=1.0), -1) # (B,K,3) -> (B,K)
size_residual_normalized_loss = torch.sum(size_residual_normalized_loss*objectness_label)/(torch.sum(objectness_label)+1e-6)
# 3.4 Semantic cls loss
sem_cls_label = torch.gather(end_points['sem_cls_label'], 1, object_assignment) # select (B,K) from (B,K2)
criterion_sem_cls = nn.CrossEntropyLoss(reduction='none')
sem_cls_loss = criterion_sem_cls(end_points['sem_cls_scores'].transpose(2,1), sem_cls_label) # (B,K)
sem_cls_loss = torch.sum(sem_cls_loss * objectness_label)/(torch.sum(objectness_label)+1e-6)
return center_loss, heading_class_loss, heading_residual_normalized_loss, size_class_loss, size_residual_normalized_loss, sem_cls_loss
def get_loss(end_points, config):
""" Loss functions
Args:
end_points: dict
{
seed_xyz, seed_inds, vote_xyz,
center,
heading_scores, heading_residuals_normalized,
size_scores, size_residuals_normalized,
sem_cls_scores, #seed_logits,#
center_label,
heading_class_label, heading_residual_label,
size_class_label, size_residual_label,
sem_cls_label,
box_label_mask,
vote_label, vote_label_mask
}
config: dataset config instance
Returns:
loss: pytorch scalar tensor
end_points: dict
"""
# Vote loss
vote_loss = compute_vote_loss(end_points)
end_points['vote_loss'] = vote_loss
# Obj loss
objectness_loss, objectness_label, objectness_mask, object_assignment = \
compute_objectness_loss(end_points)
end_points['objectness_loss'] = objectness_loss
end_points['objectness_label'] = objectness_label
end_points['objectness_mask'] = objectness_mask
end_points['object_assignment'] = object_assignment
total_num_proposal = objectness_label.shape[0]*objectness_label.shape[1]
end_points['pos_ratio'] = \
torch.sum(objectness_label.float().cuda())/float(total_num_proposal)
end_points['neg_ratio'] = \
torch.sum(objectness_mask.float())/float(total_num_proposal) - end_points['pos_ratio']
# Box loss and sem cls loss
center_loss, heading_cls_loss, heading_reg_loss, size_cls_loss, size_reg_loss, sem_cls_loss = \
compute_box_and_sem_cls_loss(end_points, config)
end_points['center_loss'] = center_loss
end_points['heading_cls_loss'] = heading_cls_loss
end_points['heading_reg_loss'] = heading_reg_loss
end_points['size_cls_loss'] = size_cls_loss
end_points['size_reg_loss'] = size_reg_loss
end_points['sem_cls_loss'] = sem_cls_loss
box_loss = center_loss + 0.1*heading_cls_loss + heading_reg_loss + 0.1*size_cls_loss + size_reg_loss
end_points['box_loss'] = box_loss
# Final loss function
loss = vote_loss + 0.5*objectness_loss + box_loss + 0.1*sem_cls_loss
loss *= 10
end_points['loss'] = loss
# --------------------------------------------
# Some other statistics
obj_pred_val = torch.argmax(end_points['objectness_scores'], 2) # B,K
obj_acc = torch.sum((obj_pred_val==objectness_label.long()).float()*objectness_mask)/(torch.sum(objectness_mask)+1e-6)
end_points['obj_acc'] = obj_acc
return loss, end_points
| ContrastiveSceneContexts-main | downstream/votenet/models/loss_helper.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import os
import sys
from lib.utils import pc_util
DUMP_CONF_THRESH = 0.5 # Dump boxes with obj prob larger than that.
def softmax(x):
''' Numpy function for softmax'''
shape = x.shape
probs = np.exp(x - np.max(x, axis=len(shape)-1, keepdims=True))
probs /= np.sum(probs, axis=len(shape)-1, keepdims=True)
return probs
def dump_results(end_points, dump_dir, config, inference_switch=False):
''' Dump results.
Args:
end_points: dict
{..., pred_mask}
pred_mask is a binary mask array of size (batch_size, num_proposal) computed by running NMS and empty box removal
Returns:
None
'''
if not os.path.exists(dump_dir):
os.system('mkdir %s'%(dump_dir))
# INPUT
point_clouds = end_points['point_clouds'].cpu().numpy()
batch_size = point_clouds.shape[0]
# NETWORK OUTPUTS
seed_xyz = end_points['seed_xyz'].detach().cpu().numpy() # (B,num_seed,3)
if 'vote_xyz' in end_points:
aggregated_vote_xyz = end_points['aggregated_vote_xyz'].detach().cpu().numpy()
vote_xyz = end_points['vote_xyz'].detach().cpu().numpy() # (B,num_seed,3)
aggregated_vote_xyz = end_points['aggregated_vote_xyz'].detach().cpu().numpy()
objectness_scores = end_points['objectness_scores'].detach().cpu().numpy() # (B,K,2)
pred_center = end_points['center'].detach().cpu().numpy() # (B,K,3)
pred_heading_class = torch.argmax(end_points['heading_scores'], -1) # B,num_proposal
pred_heading_residual = torch.gather(end_points['heading_residuals'], 2, pred_heading_class.unsqueeze(-1)) # B,num_proposal,1
pred_heading_class = pred_heading_class.detach().cpu().numpy() # B,num_proposal
pred_heading_residual = pred_heading_residual.squeeze(2).detach().cpu().numpy() # B,num_proposal
pred_size_class = torch.argmax(end_points['size_scores'], -1) # B,num_proposal
pred_size_residual = torch.gather(end_points['size_residuals'], 2, pred_size_class.unsqueeze(-1).unsqueeze(-1).repeat(1,1,1,3)) # B,num_proposal,1,3
pred_size_residual = pred_size_residual.squeeze(2).detach().cpu().numpy() # B,num_proposal,3
# OTHERS
pred_mask = end_points['pred_mask'] # B,num_proposal
idx_beg = 0
for i in range(batch_size):
pc = point_clouds[i,:,:]
objectness_prob = softmax(objectness_scores[i,:,:])[:,1] # (K,)
# Dump various point clouds
pc_util.write_ply(pc, os.path.join(dump_dir, '%06d_pc.ply'%(idx_beg+i)))
pc_util.write_ply(seed_xyz[i,:,:], os.path.join(dump_dir, '%06d_seed_pc.ply'%(idx_beg+i)))
if 'vote_xyz' in end_points:
pc_util.write_ply(end_points['vote_xyz'][i,:,:], os.path.join(dump_dir, '%06d_vgen_pc.ply'%(idx_beg+i)))
pc_util.write_ply(aggregated_vote_xyz[i,:,:], os.path.join(dump_dir, '%06d_aggregated_vote_pc.ply'%(idx_beg+i)))
pc_util.write_ply(aggregated_vote_xyz[i,:,:], os.path.join(dump_dir, '%06d_aggregated_vote_pc.ply'%(idx_beg+i)))
pc_util.write_ply(pred_center[i,:,0:3], os.path.join(dump_dir, '%06d_proposal_pc.ply'%(idx_beg+i)))
if np.sum(objectness_prob>DUMP_CONF_THRESH)>0:
pc_util.write_ply(pred_center[i,objectness_prob>DUMP_CONF_THRESH,0:3], os.path.join(dump_dir, '%06d_confident_proposal_pc.ply'%(idx_beg+i)))
# Dump predicted bounding boxes
if np.sum(objectness_prob>DUMP_CONF_THRESH)>0:
num_proposal = pred_center.shape[1]
obbs = []
for j in range(num_proposal):
obb = config.param2obb(pred_center[i,j,0:3], pred_heading_class[i,j], pred_heading_residual[i,j],
pred_size_class[i,j], pred_size_residual[i,j])
obbs.append(obb)
if len(obbs)>0:
obbs = np.vstack(tuple(obbs)) # (num_proposal, 7)
pc_util.write_oriented_bbox(obbs[objectness_prob>DUMP_CONF_THRESH,:], os.path.join(dump_dir, '%06d_pred_confident_bbox.ply'%(idx_beg+i)))
pc_util.write_oriented_bbox(obbs[np.logical_and(objectness_prob>DUMP_CONF_THRESH, pred_mask[i,:]==1),:], os.path.join(dump_dir, '%06d_pred_confident_nms_bbox.ply'%(idx_beg+i)))
pc_util.write_oriented_bbox(obbs[pred_mask[i,:]==1,:], os.path.join(dump_dir, '%06d_pred_nms_bbox.ply'%(idx_beg+i)))
pc_util.write_oriented_bbox(obbs, os.path.join(dump_dir, '%06d_pred_bbox.ply'%(idx_beg+i)))
# Return if it is at inference time. No dumping of groundtruths
if inference_switch:
return
# LABELS
gt_center = end_points['center_label'].cpu().numpy() # (B,MAX_NUM_OBJ,3)
gt_mask = end_points['box_label_mask'].cpu().numpy() # B,K2
gt_heading_class = end_points['heading_class_label'].cpu().numpy() # B,K2
gt_heading_residual = end_points['heading_residual_label'].cpu().numpy() # B,K2
gt_size_class = end_points['size_class_label'].cpu().numpy() # B,K2
gt_size_residual = end_points['size_residual_label'].cpu().numpy() # B,K2,3
objectness_label = end_points['objectness_label'].detach().cpu().numpy() # (B,K,)
objectness_mask = end_points['objectness_mask'].detach().cpu().numpy() # (B,K,)
for i in range(batch_size):
if np.sum(objectness_label[i,:])>0:
pc_util.write_ply(pred_center[i,objectness_label[i,:]>0,0:3], os.path.join(dump_dir, '%06d_gt_positive_proposal_pc.ply'%(idx_beg+i)))
if np.sum(objectness_mask[i,:])>0:
pc_util.write_ply(pred_center[i,objectness_mask[i,:]>0,0:3], os.path.join(dump_dir, '%06d_gt_mask_proposal_pc.ply'%(idx_beg+i)))
pc_util.write_ply(gt_center[i,:,0:3], os.path.join(dump_dir, '%06d_gt_centroid_pc.ply'%(idx_beg+i)))
pc_util.write_ply_color(pred_center[i,:,0:3], objectness_label[i,:], os.path.join(dump_dir, '%06d_proposal_pc_objectness_label.obj'%(idx_beg+i)))
# Dump GT bounding boxes
obbs = []
for j in range(gt_center.shape[1]):
if gt_mask[i,j] == 0: continue
obb = config.param2obb(gt_center[i,j,0:3], gt_heading_class[i,j], gt_heading_residual[i,j],
gt_size_class[i,j], gt_size_residual[i,j])
obbs.append(obb)
if len(obbs)>0:
obbs = np.vstack(tuple(obbs)) # (num_gt_objects, 7)
pc_util.write_oriented_bbox(obbs, os.path.join(dump_dir, '%06d_gt_bbox.ply'%(idx_beg+i)))
# OPTIONALL, also dump prediction and gt details
if 'batch_pred_map_cls' in end_points:
for ii in range(batch_size):
fout = open(os.path.join(dump_dir, '%06d_pred_map_cls.txt'%(ii)), 'w')
for t in end_points['batch_pred_map_cls'][ii]:
fout.write(str(t[0])+' ')
fout.write(",".join([str(x) for x in list(t[1].flatten())]))
fout.write(' '+str(t[2]))
fout.write('\n')
fout.close()
if 'batch_gt_map_cls' in end_points:
for ii in range(batch_size):
fout = open(os.path.join(dump_dir, '%06d_gt_map_cls.txt'%(ii)), 'w')
for t in end_points['batch_gt_map_cls'][ii]:
fout.write(str(t[0])+' ')
fout.write(",".join([str(x) for x in list(t[1].flatten())]))
fout.write('\n')
fout.close()
def dump_results_(end_points, dump_dir, config):
''' Dump results.
Args:
end_points: dict
{..., pred_mask}
pred_mask is a binary mask array of size (batch_size, num_proposal) computed by running NMS and empty box removal
Returns:
None
'''
if not os.path.exists(dump_dir):
os.system('mkdir %s'%(dump_dir))
# INPUT
point_clouds = end_points['point_clouds'].cpu().numpy()
batch_size = point_clouds.shape[0]
# NETWORK OUTPUTS
objectness_scores = end_points['objectness_scores'].detach().cpu().numpy() # (B,K,2)
pred_center = end_points['center'].detach().cpu().numpy() # (B,K,3)
pred_heading_class = torch.argmax(end_points['heading_scores'], -1) # B,num_proposal
pred_heading_residual = torch.gather(end_points['heading_residuals'], 2, pred_heading_class.unsqueeze(-1)) # B,num_proposal,1
pred_heading_class = pred_heading_class.detach().cpu().numpy() # B,num_proposal
pred_heading_residual = pred_heading_residual.squeeze(2).detach().cpu().numpy() # B,num_proposal
pred_size_class = torch.argmax(end_points['size_scores'], -1) # B,num_proposal
pred_size_residual = torch.gather(end_points['size_residuals'], 2, pred_size_class.unsqueeze(-1).unsqueeze(-1).repeat(1,1,1,3)) # B,num_proposal,1,3
pred_size_residual = pred_size_residual.squeeze(2).detach().cpu().numpy() # B,num_proposal,3
# OTHERS
pred_mask = end_points['pred_mask'] # B,num_proposal
pc = point_clouds[0,:,:]
objectness_prob = softmax(objectness_scores[0,:,:])[:,1] # (K,)
# Dump various point clouds
scan_idx = end_points['scan_idx']
scan_idx = str(scan_idx.cpu().numpy()[0])
os.makedirs(os.path.join(dump_dir, scan_idx))
pc_util.write_ply(pc, os.path.join(dump_dir, scan_idx, 'pc.ply'))
# Dump predicted bounding boxes
if np.sum(objectness_prob>DUMP_CONF_THRESH)>0:
num_proposal = pred_center.shape[1]
obbs = []
for j in range(num_proposal):
obb = config.param2obb(pred_center[0,j,0:3], pred_heading_class[0,j], pred_heading_residual[0,j],
pred_size_class[0,j], pred_size_residual[0,j])
obbs.append(obb)
if len(obbs)>0:
obbs = np.vstack(tuple(obbs)) # (num_proposal, 7)
obbs = obbs[np.logical_and(objectness_prob>DUMP_CONF_THRESH, pred_mask[0,:]==1),:]
for idx, obb in enumerate(obbs):
pc_util.write_oriented_bbox_(obb, os.path.join(dump_dir, scan_idx, '{}.ply'.format(idx)))
| ContrastiveSceneContexts-main | downstream/votenet/models/dump_helper.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import numpy as np
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
from nn_distance import nn_distance, huber_loss
sys.path.append(BASE_DIR)
from loss_helper import compute_box_and_sem_cls_loss
OBJECTNESS_CLS_WEIGHTS = [0.2,0.8] # put larger weights on positive objectness
def compute_objectness_loss(end_points):
""" Compute objectness loss for the proposals.
Args:
end_points: dict (read-only)
Returns:
objectness_loss: scalar Tensor
objectness_label: (batch_size, num_seed) Tensor with value 0 or 1
objectness_mask: (batch_size, num_seed) Tensor with value 0 or 1
object_assignment: (batch_size, num_seed) Tensor with long int
within [0,num_gt_object-1]
"""
# Associate proposal and GT objects by point-to-point distances
aggregated_vote_xyz = end_points['aggregated_vote_xyz']
gt_center = end_points['center_label'][:,:,0:3]
B = gt_center.shape[0]
K = aggregated_vote_xyz.shape[1]
K2 = gt_center.shape[1]
dist1, ind1, dist2, _ = nn_distance(aggregated_vote_xyz, gt_center) # dist1: BxK, dist2: BxK2
# Generate objectness label and mask
# NOTE: Different from VoteNet, here we use seed label as objectness label.
seed_inds = end_points['seed_inds'].long() # B,num_seed in [0,num_points-1]
seed_gt_votes_mask = torch.gather(end_points['vote_label_mask'], 1, seed_inds)
end_points['seed_labels'] = seed_gt_votes_mask
aggregated_vote_inds = end_points['aggregated_vote_inds']
objectness_label = torch.gather(end_points['seed_labels'], 1, aggregated_vote_inds.long()) # select (B,K) from (B,1024)
objectness_mask = torch.ones((objectness_label.shape[0], objectness_label.shape[1])).cuda() # no ignore zone anymore
# Compute objectness loss
objectness_scores = end_points['objectness_scores']
criterion = nn.CrossEntropyLoss(torch.Tensor(OBJECTNESS_CLS_WEIGHTS).cuda(), reduction='none')
objectness_loss = criterion(objectness_scores.transpose(2,1), objectness_label)
objectness_loss = torch.sum(objectness_loss * objectness_mask)/(torch.sum(objectness_mask)+1e-6)
# Set assignment
object_assignment = ind1 # (B,K) with values in 0,1,...,K2-1
return objectness_loss, objectness_label, objectness_mask, object_assignment
def get_loss(end_points, config):
""" Loss functions
Args:
end_points: dict
{
seed_xyz, seed_inds,
center,
heading_scores, heading_residuals_normalized,
size_scores, size_residuals_normalized,
sem_cls_scores, #seed_logits,#
center_label,
heading_class_label, heading_residual_label,
size_class_label, size_residual_label,
sem_cls_label,
box_label_mask,
vote_label, vote_label_mask
}
config: dataset config instance
Returns:
loss: pytorch scalar tensor
end_points: dict
"""
# Obj loss
objectness_loss, objectness_label, objectness_mask, object_assignment = \
compute_objectness_loss(end_points)
end_points['objectness_loss'] = objectness_loss
end_points['objectness_label'] = objectness_label
end_points['objectness_mask'] = objectness_mask
end_points['object_assignment'] = object_assignment
total_num_proposal = objectness_label.shape[0]*objectness_label.shape[1]
end_points['pos_ratio'] = \
torch.sum(objectness_label.float().cuda())/float(total_num_proposal)
end_points['neg_ratio'] = \
torch.sum(objectness_mask.float())/float(total_num_proposal) - end_points['pos_ratio']
# Box loss and sem cls loss
center_loss, heading_cls_loss, heading_reg_loss, size_cls_loss, size_reg_loss, sem_cls_loss = \
compute_box_and_sem_cls_loss(end_points, config)
end_points['center_loss'] = center_loss
end_points['heading_cls_loss'] = heading_cls_loss
end_points['heading_reg_loss'] = heading_reg_loss
end_points['size_cls_loss'] = size_cls_loss
end_points['size_reg_loss'] = size_reg_loss
end_points['sem_cls_loss'] = sem_cls_loss
box_loss = center_loss + 0.1*heading_cls_loss + heading_reg_loss + 0.1*size_cls_loss + size_reg_loss
end_points['box_loss'] = box_loss
# Final loss function
loss = 0.5*objectness_loss + box_loss + 0.1*sem_cls_loss
loss *= 10
end_points['loss'] = loss
# --------------------------------------------
# Some other statistics
obj_pred_val = torch.argmax(end_points['objectness_scores'], 2) # B,K
obj_acc = torch.sum((obj_pred_val==objectness_label.long()).float()*objectness_mask)/(torch.sum(objectness_mask)+1e-6)
end_points['obj_acc'] = obj_acc
return loss, end_points
| ContrastiveSceneContexts-main | downstream/votenet/models/loss_helper_boxnet.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" Helper functions and class to calculate Average Precisions for 3D object detection.
"""
import os
import sys
import numpy as np
import torch
from lib.utils.eval_det import eval_det_cls, eval_det_multiprocessing
from lib.utils.eval_det import get_iou_obb
from lib.utils.nms import nms_2d_faster, nms_3d_faster, nms_3d_faster_samecls
from lib.utils.box_util import get_3d_box
from datasets.sunrgbd.sunrgbd_utils import extract_pc_in_box3d
def flip_axis_back_camera(pc):
''' Flip X-right,Y-forward,Z-up to X-right,Y-down,Z-forward
Input and output are both (N,3) array
'''
pc2 = np.copy(pc)
pc2[...,1] *= -1
pc2[...,[0,1,2]] = pc2[...,[0,2,1]] # cam X,Y,Z = depth X,-Z,Y
return pc2
def flip_axis_to_camera(pc):
''' Flip X-right,Y-forward,Z-up to X-right,Y-down,Z-forward
Input and output are both (N,3) array
'''
pc2 = np.copy(pc)
pc2[...,[0,1,2]] = pc2[...,[0,2,1]] # cam X,Y,Z = depth X,-Z,Y
pc2[...,1] *= -1
return pc2
def flip_axis_to_depth(pc):
pc2 = np.copy(pc)
pc2[...,[0,1,2]] = pc2[...,[0,2,1]] # depth X,Y,Z = cam X,Z,-Y
pc2[...,2] *= -1
return pc2
def softmax(x):
''' Numpy function for softmax'''
shape = x.shape
probs = np.exp(x - np.max(x, axis=len(shape)-1, keepdims=True))
probs /= np.sum(probs, axis=len(shape)-1, keepdims=True)
return probs
def parse_predictions(end_points, config_dict):
""" Parse predictions to OBB parameters and suppress overlapping boxes
Args:
end_points: dict
{point_clouds, center, heading_scores, heading_residuals,
size_scores, size_residuals, sem_cls_scores}
config_dict: dict
{dataset_config, remove_empty_box, use_3d_nms, nms_iou,
use_old_type_nms, conf_thresh, per_class_proposal}
Returns:
batch_pred_map_cls: a list of len == batch size (BS)
[pred_list_i], i = 0, 1, ..., BS-1
where pred_list_i = [(pred_sem_cls, box_params, box_score)_j]
where j = 0, ..., num of valid detections - 1 from sample input i
"""
pred_center = end_points['center'] # B,num_proposal,3
pred_heading_class = torch.argmax(end_points['heading_scores'], -1) # B,num_proposal
pred_heading_residual = torch.gather(end_points['heading_residuals'], 2,
pred_heading_class.unsqueeze(-1)) # B,num_proposal,1
pred_heading_residual.squeeze_(2)
pred_size_class = torch.argmax(end_points['size_scores'], -1) # B,num_proposal
pred_size_residual = torch.gather(end_points['size_residuals'], 2,
pred_size_class.unsqueeze(-1).unsqueeze(-1).repeat(1,1,1,3)) # B,num_proposal,1,3
pred_size_residual.squeeze_(2)
pred_sem_cls = torch.argmax(end_points['sem_cls_scores'], -1) # B,num_proposal
sem_cls_probs = softmax(end_points['sem_cls_scores'].detach().cpu().numpy()) # B,num_proposal,10
pred_sem_cls_prob = np.max(sem_cls_probs,-1) # B,num_proposal
num_proposal = pred_center.shape[1]
# Since we operate in upright_depth coord for points, while util functions
# assume upright_camera coord.
bsize = pred_center.shape[0]
pred_corners_3d_upright_camera = np.zeros((bsize, num_proposal, 8, 3))
pred_center_upright_camera = flip_axis_to_camera(pred_center.detach().cpu().numpy())
for i in range(bsize):
for j in range(num_proposal):
heading_angle = config_dict['dataset_config'].class2angle(\
pred_heading_class[i,j].detach().cpu().numpy(), pred_heading_residual[i,j].detach().cpu().numpy())
box_size = config_dict['dataset_config'].class2size(\
int(pred_size_class[i,j].detach().cpu().numpy()), pred_size_residual[i,j].detach().cpu().numpy())
corners_3d_upright_camera = get_3d_box(box_size, heading_angle, pred_center_upright_camera[i,j,:])
pred_corners_3d_upright_camera[i,j] = corners_3d_upright_camera
K = pred_center.shape[1] # K==num_proposal
nonempty_box_mask = np.ones((bsize, K))
if config_dict['remove_empty_box']:
# -------------------------------------
# Remove predicted boxes without any point within them..
batch_pc = end_points['point_clouds'].cpu().numpy()[:,:,0:3] # B,N,3
for i in range(bsize):
pc = batch_pc[i,:,:] # (N,3)
for j in range(K):
box3d = pred_corners_3d_upright_camera[i,j,:,:] # (8,3)
box3d = flip_axis_to_depth(box3d)
pc_in_box,inds = extract_pc_in_box3d(pc, box3d)
if len(pc_in_box) < 5:
nonempty_box_mask[i,j] = 0
# -------------------------------------
obj_logits = end_points['objectness_scores'].detach().cpu().numpy()
obj_prob = softmax(obj_logits)[:,:,1] # (B,K)
if not config_dict['use_3d_nms']:
# ---------- NMS input: pred_with_prob in (B,K,7) -----------
pred_mask = np.zeros((bsize, K))
for i in range(bsize):
boxes_2d_with_prob = np.zeros((K,5))
for j in range(K):
boxes_2d_with_prob[j,0] = np.min(pred_corners_3d_upright_camera[i,j,:,0])
boxes_2d_with_prob[j,2] = np.max(pred_corners_3d_upright_camera[i,j,:,0])
boxes_2d_with_prob[j,1] = np.min(pred_corners_3d_upright_camera[i,j,:,2])
boxes_2d_with_prob[j,3] = np.max(pred_corners_3d_upright_camera[i,j,:,2])
boxes_2d_with_prob[j,4] = obj_prob[i,j]
nonempty_box_inds = np.where(nonempty_box_mask[i,:]==1)[0]
pick = nms_2d_faster(boxes_2d_with_prob[nonempty_box_mask[i,:]==1,:],
config_dict['nms_iou'], config_dict['use_old_type_nms'])
assert(len(pick)>0)
pred_mask[i, nonempty_box_inds[pick]] = 1
end_points['pred_mask'] = pred_mask
# ---------- NMS output: pred_mask in (B,K) -----------
elif config_dict['use_3d_nms'] and (not config_dict['cls_nms']):
# ---------- NMS input: pred_with_prob in (B,K,7) -----------
pred_mask = np.zeros((bsize, K))
for i in range(bsize):
boxes_3d_with_prob = np.zeros((K,7))
for j in range(K):
boxes_3d_with_prob[j,0] = np.min(pred_corners_3d_upright_camera[i,j,:,0])
boxes_3d_with_prob[j,1] = np.min(pred_corners_3d_upright_camera[i,j,:,1])
boxes_3d_with_prob[j,2] = np.min(pred_corners_3d_upright_camera[i,j,:,2])
boxes_3d_with_prob[j,3] = np.max(pred_corners_3d_upright_camera[i,j,:,0])
boxes_3d_with_prob[j,4] = np.max(pred_corners_3d_upright_camera[i,j,:,1])
boxes_3d_with_prob[j,5] = np.max(pred_corners_3d_upright_camera[i,j,:,2])
boxes_3d_with_prob[j,6] = obj_prob[i,j]
nonempty_box_inds = np.where(nonempty_box_mask[i,:]==1)[0]
pick = nms_3d_faster(boxes_3d_with_prob[nonempty_box_mask[i,:]==1,:],
config_dict['nms_iou'], config_dict['use_old_type_nms'])
assert(len(pick)>0)
pred_mask[i, nonempty_box_inds[pick]] = 1
end_points['pred_mask'] = pred_mask
# ---------- NMS output: pred_mask in (B,K) -----------
elif config_dict['use_3d_nms'] and config_dict['cls_nms']:
# ---------- NMS input: pred_with_prob in (B,K,8) -----------
pred_mask = np.zeros((bsize, K))
for i in range(bsize):
boxes_3d_with_prob = np.zeros((K,8))
for j in range(K):
boxes_3d_with_prob[j,0] = np.min(pred_corners_3d_upright_camera[i,j,:,0])
boxes_3d_with_prob[j,1] = np.min(pred_corners_3d_upright_camera[i,j,:,1])
boxes_3d_with_prob[j,2] = np.min(pred_corners_3d_upright_camera[i,j,:,2])
boxes_3d_with_prob[j,3] = np.max(pred_corners_3d_upright_camera[i,j,:,0])
boxes_3d_with_prob[j,4] = np.max(pred_corners_3d_upright_camera[i,j,:,1])
boxes_3d_with_prob[j,5] = np.max(pred_corners_3d_upright_camera[i,j,:,2])
boxes_3d_with_prob[j,6] = obj_prob[i,j]
boxes_3d_with_prob[j,7] = pred_sem_cls[i,j] # only suppress if the two boxes are of the same class!!
nonempty_box_inds = np.where(nonempty_box_mask[i,:]==1)[0]
pick = nms_3d_faster_samecls(boxes_3d_with_prob[nonempty_box_mask[i,:]==1,:],
config_dict['nms_iou'], config_dict['use_old_type_nms'])
assert(len(pick)>0)
pred_mask[i, nonempty_box_inds[pick]] = 1
end_points['pred_mask'] = pred_mask
# ---------- NMS output: pred_mask in (B,K) -----------
batch_pred_map_cls = [] # a list (len: batch_size) of list (len: num of predictions per sample) of tuples of pred_cls, pred_box and conf (0-1)
for i in range(bsize):
if config_dict['per_class_proposal']:
cur_list = []
for ii in range(config_dict['dataset_config'].num_class):
cur_list += [(ii, pred_corners_3d_upright_camera[i,j], sem_cls_probs[i,j,ii]*obj_prob[i,j]) \
for j in range(pred_center.shape[1]) if pred_mask[i,j]==1 and obj_prob[i,j]>config_dict['conf_thresh']]
batch_pred_map_cls.append(cur_list)
else:
batch_pred_map_cls.append([(pred_sem_cls[i,j].item(), pred_corners_3d_upright_camera[i,j], obj_prob[i,j]) \
for j in range(pred_center.shape[1]) if pred_mask[i,j]==1 and obj_prob[i,j]>config_dict['conf_thresh']])
end_points['batch_pred_map_cls'] = batch_pred_map_cls
return batch_pred_map_cls
def parse_groundtruths(end_points, config_dict):
""" Parse groundtruth labels to OBB parameters.
Args:
end_points: dict
{center_label, heading_class_label, heading_residual_label,
size_class_label, size_residual_label, sem_cls_label,
box_label_mask}
config_dict: dict
{dataset_config}
Returns:
batch_gt_map_cls: a list of len == batch_size (BS)
[gt_list_i], i = 0, 1, ..., BS-1
where gt_list_i = [(gt_sem_cls, gt_box_params)_j]
where j = 0, ..., num of objects - 1 at sample input i
"""
center_label = end_points['center_label']
heading_class_label = end_points['heading_class_label']
heading_residual_label = end_points['heading_residual_label']
size_class_label = end_points['size_class_label']
size_residual_label = end_points['size_residual_label']
box_label_mask = end_points['box_label_mask']
sem_cls_label = end_points['sem_cls_label']
bsize = center_label.shape[0]
K2 = center_label.shape[1] # K2==MAX_NUM_OBJ
gt_corners_3d_upright_camera = np.zeros((bsize, K2, 8, 3))
gt_center_upright_camera = flip_axis_to_camera(center_label[:,:,0:3].detach().cpu().numpy())
for i in range(bsize):
for j in range(K2):
if box_label_mask[i,j] == 0: continue
heading_angle = config_dict['dataset_config'].class2angle(heading_class_label[i,j].detach().cpu().numpy(), heading_residual_label[i,j].detach().cpu().numpy())
box_size = config_dict['dataset_config'].class2size(int(size_class_label[i,j].detach().cpu().numpy()), size_residual_label[i,j].detach().cpu().numpy())
corners_3d_upright_camera = get_3d_box(box_size, heading_angle, gt_center_upright_camera[i,j,:])
gt_corners_3d_upright_camera[i,j] = corners_3d_upright_camera
batch_gt_map_cls = []
for i in range(bsize):
batch_gt_map_cls.append([(sem_cls_label[i,j].item(), gt_corners_3d_upright_camera[i,j]) for j in range(gt_corners_3d_upright_camera.shape[1]) if box_label_mask[i,j]==1])
end_points['batch_gt_map_cls'] = batch_gt_map_cls
return batch_gt_map_cls
class APCalculator(object):
''' Calculating Average Precision '''
def __init__(self, ap_iou_thresh=0.25, class2type_map=None):
"""
Args:
ap_iou_thresh: float between 0 and 1.0
IoU threshold to judge whether a prediction is positive.
class2type_map: [optional] dict {class_int:class_name}
"""
self.ap_iou_thresh = ap_iou_thresh
self.class2type_map = class2type_map
self.reset()
def step(self, batch_pred_map_cls, batch_gt_map_cls):
""" Accumulate one batch of prediction and groundtruth.
Args:
batch_pred_map_cls: a list of lists [[(pred_cls, pred_box_params, score),...],...]
batch_gt_map_cls: a list of lists [[(gt_cls, gt_box_params),...],...]
should have the same length with batch_pred_map_cls (batch_size)
"""
bsize = len(batch_pred_map_cls)
assert(bsize == len(batch_gt_map_cls))
for i in range(bsize):
self.gt_map_cls[self.scan_cnt] = batch_gt_map_cls[i]
self.pred_map_cls[self.scan_cnt] = batch_pred_map_cls[i]
self.scan_cnt += 1
def compute_metrics(self):
""" Use accumulated predictions and groundtruths to compute Average Precision.
"""
rec, prec, ap = eval_det_multiprocessing(self.pred_map_cls, self.gt_map_cls, ovthresh=self.ap_iou_thresh, get_iou_func=get_iou_obb)
ret_dict = {}
for key in sorted(ap.keys()):
clsname = self.class2type_map[key] if self.class2type_map else str(key)
ret_dict['%s Average Precision'%(clsname)] = ap[key]
ret_dict['mAP'] = np.mean(list(ap.values()))
rec_list = []
for key in sorted(ap.keys()):
clsname = self.class2type_map[key] if self.class2type_map else str(key)
try:
ret_dict['%s Recall'%(clsname)] = rec[key][-1]
rec_list.append(rec[key][-1])
except:
ret_dict['%s Recall'%(clsname)] = 0
rec_list.append(0)
ret_dict['AR'] = np.mean(rec_list)
return ret_dict
def reset(self):
self.gt_map_cls = {} # {scan_id: [(classname, bbox)]}
self.pred_map_cls = {} # {scan_id: [(classname, bbox, score)]}
self.scan_cnt = 0
| ContrastiveSceneContexts-main | downstream/votenet/models/ap_helper.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
''' Voting module: generate votes from XYZ and features of seed points.
Date: July, 2019
Author: Charles R. Qi and Or Litany
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class VotingModule(nn.Module):
def __init__(self, vote_factor, seed_feature_dim):
""" Votes generation from seed point features.
Args:
vote_facotr: int
number of votes generated from each seed point
seed_feature_dim: int
number of channels of seed point features
vote_feature_dim: int
number of channels of vote features
"""
super().__init__()
self.vote_factor = vote_factor
self.in_dim = seed_feature_dim
self.out_dim = self.in_dim # due to residual feature, in_dim has to be == out_dim
self.conv1 = torch.nn.Conv1d(self.in_dim, self.in_dim, 1)
self.conv2 = torch.nn.Conv1d(self.in_dim, self.in_dim, 1)
self.conv3 = torch.nn.Conv1d(self.in_dim, (3+self.out_dim) * self.vote_factor, 1)
self.bn1 = torch.nn.BatchNorm1d(self.in_dim)
self.bn2 = torch.nn.BatchNorm1d(self.in_dim)
def forward(self, seed_xyz, seed_features):
""" Forward pass.
Arguments:
seed_xyz: (batch_size, num_seed, 3) Pytorch tensor
seed_features: (batch_size, feature_dim, num_seed) Pytorch tensor
Returns:
vote_xyz: (batch_size, num_seed*vote_factor, 3)
vote_features: (batch_size, vote_feature_dim, num_seed*vote_factor)
"""
batch_size = seed_xyz.shape[0]
num_seed = seed_xyz.shape[1]
num_vote = num_seed*self.vote_factor
net = F.relu(self.bn1(self.conv1(seed_features)))
net = F.relu(self.bn2(self.conv2(net)))
net = self.conv3(net) # (batch_size, (3+out_dim)*vote_factor, num_seed)
net = net.transpose(2,1).view(batch_size, num_seed, self.vote_factor, 3+self.out_dim)
offset = net[:,:,:,0:3]
vote_xyz = seed_xyz.unsqueeze(2) + offset
vote_xyz = vote_xyz.contiguous().view(batch_size, num_vote, 3)
residual_features = net[:,:,:,3:] # (batch_size, num_seed, vote_factor, out_dim)
vote_features = seed_features.transpose(2,1).unsqueeze(2) + residual_features
vote_features = vote_features.contiguous().view(batch_size, num_vote, self.out_dim)
vote_features = vote_features.transpose(2,1).contiguous()
return vote_xyz, vote_features
if __name__=='__main__':
net = VotingModule(2, 256).cuda()
xyz, features = net(torch.rand(8,1024,3).cuda(), torch.rand(8,256,1024).cuda())
print('xyz', xyz.shape)
print('features', features.shape)
| ContrastiveSceneContexts-main | downstream/votenet/models/voting_module.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'pointnet2'))
from pointnet2_modules import PointnetSAModuleVotes
import pointnet2_utils
def decode_scores(net, end_points, num_class, num_heading_bin, num_size_cluster, mean_size_arr):
net_transposed = net.transpose(2,1) # (batch_size, 1024, ..)
batch_size = net_transposed.shape[0]
num_proposal = net_transposed.shape[1]
objectness_scores = net_transposed[:,:,0:2]
end_points['objectness_scores'] = objectness_scores
base_xyz = end_points['aggregated_vote_xyz'] # (batch_size, num_proposal, 3)
center = base_xyz + net_transposed[:,:,2:5] # (batch_size, num_proposal, 3)
end_points['center'] = center
heading_scores = net_transposed[:,:,5:5+num_heading_bin]
heading_residuals_normalized = net_transposed[:,:,5+num_heading_bin:5+num_heading_bin*2]
end_points['heading_scores'] = heading_scores # Bxnum_proposalxnum_heading_bin
end_points['heading_residuals_normalized'] = heading_residuals_normalized # Bxnum_proposalxnum_heading_bin (should be -1 to 1)
end_points['heading_residuals'] = heading_residuals_normalized * (np.pi/num_heading_bin) # Bxnum_proposalxnum_heading_bin
size_scores = net_transposed[:,:,5+num_heading_bin*2:5+num_heading_bin*2+num_size_cluster]
size_residuals_normalized = net_transposed[:,:,5+num_heading_bin*2+num_size_cluster:5+num_heading_bin*2+num_size_cluster*4].view([batch_size, num_proposal, num_size_cluster, 3]) # Bxnum_proposalxnum_size_clusterx3
end_points['size_scores'] = size_scores
end_points['size_residuals_normalized'] = size_residuals_normalized
end_points['size_residuals'] = size_residuals_normalized * torch.from_numpy(mean_size_arr.astype(np.float32)).cuda().unsqueeze(0).unsqueeze(0)
sem_cls_scores = net_transposed[:,:,5+num_heading_bin*2+num_size_cluster*4:] # Bxnum_proposalx10
end_points['sem_cls_scores'] = sem_cls_scores
return end_points
class ProposalModule(nn.Module):
def __init__(self, num_class, num_heading_bin, num_size_cluster, mean_size_arr, num_proposal, sampling, seed_feat_dim=256):
super().__init__()
self.num_class = num_class
self.num_heading_bin = num_heading_bin
self.num_size_cluster = num_size_cluster
self.mean_size_arr = mean_size_arr
self.num_proposal = num_proposal
self.sampling = sampling
self.seed_feat_dim = seed_feat_dim
# Vote clustering
self.vote_aggregation = PointnetSAModuleVotes(
npoint=self.num_proposal,
radius=0.3,
nsample=16,
mlp=[self.seed_feat_dim, 128, 128, 128],
use_xyz=True,
normalize_xyz=True
)
# Object proposal/detection
# Objectness scores (2), center residual (3),
# heading class+residual (num_heading_bin*2), size class+residual(num_size_cluster*4)
self.conv1 = torch.nn.Conv1d(128,128,1)
self.conv2 = torch.nn.Conv1d(128,128,1)
self.conv3 = torch.nn.Conv1d(128,2+3+num_heading_bin*2+num_size_cluster*4+self.num_class,1)
self.bn1 = torch.nn.BatchNorm1d(128)
self.bn2 = torch.nn.BatchNorm1d(128)
def forward(self, xyz, features, end_points):
"""
Args:
xyz: (B,K,3)
features: (B,C,K)
Returns:
scores: (B,num_proposal,2+3+NH*2+NS*4)
"""
if self.sampling == 'vote_fps':
# Farthest point sampling (FPS) on votes
xyz, features, fps_inds = self.vote_aggregation(xyz, features)
sample_inds = fps_inds
elif self.sampling == 'seed_fps':
# FPS on seed and choose the votes corresponding to the seeds
# This gets us a slightly better coverage of *object* votes than vote_fps (which tends to get more cluster votes)
sample_inds = pointnet2_utils.furthest_point_sample(end_points['seed_xyz'], self.num_proposal)
xyz, features, _ = self.vote_aggregation(xyz, features, sample_inds)
elif self.sampling == 'random':
# Random sampling from the votes
num_seed = end_points['seed_xyz'].shape[1]
batch_size = end_points['seed_xyz'].shape[0]
sample_inds = torch.randint(0, num_seed, (batch_size, self.num_proposal), dtype=torch.int).cuda()
xyz, features, _ = self.vote_aggregation(xyz, features, sample_inds)
else:
log_string('Unknown sampling strategy: %s. Exiting!'%(self.sampling))
exit()
end_points['aggregated_vote_xyz'] = xyz # (batch_size, num_proposal, 3)
end_points['aggregated_vote_inds'] = sample_inds # (batch_size, num_proposal,) # should be 0,1,2,...,num_proposal
# --------- PROPOSAL GENERATION ---------
net = F.relu(self.bn1(self.conv1(features)))
net = F.relu(self.bn2(self.conv2(net)))
net = self.conv3(net) # (batch_size, 2+3+num_heading_bin*2+num_size_cluster*4, num_proposal)
end_points = decode_scores(net, end_points, self.num_class, self.num_heading_bin, self.num_size_cluster, self.mean_size_arr)
return end_points
if __name__=='__main__':
sys.path.append(os.path.join(ROOT_DIR, 'sunrgbd'))
from sunrgbd_detection_dataset import SunrgbdDetectionVotesDataset, DC
net = ProposalModule(DC.num_class, DC.num_heading_bin,
DC.num_size_cluster, DC.mean_size_arr,
128, 'seed_fps').cuda()
end_points = {'seed_xyz': torch.rand(8,1024,3).cuda()}
out = net(torch.rand(8,1024,3).cuda(), torch.rand(8,256,1024).cuda(), end_points)
for key in out:
print(key, out[key].shape)
| ContrastiveSceneContexts-main | downstream/votenet/models/proposal_module.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import numpy as np
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
from backbone_module import Pointnet2Backbone
from proposal_module import ProposalModule
from dump_helper import dump_results
from loss_helper_boxnet import get_loss
class BoxNet(nn.Module):
r"""
A deep neural network for 3D object detection with end-to-end optimizable hough voting.
Parameters
----------
num_class: int
Number of semantics classes to predict over -- size of softmax classifier
num_heading_bin: int
num_size_cluster: int
input_feature_dim: (default: 0)
Input dim in the feature descriptor for each point. If the point cloud is Nx9, this
value should be 6 as in an Nx9 point cloud, 3 of the channels are xyz, and 6 are feature descriptors
num_proposal: int (default: 128)
Number of proposals/detections generated from the network. Each proposal is a 3D OBB with a semantic class.
vote_factor: (default: 1)
Number of votes generated from each seed point.
"""
def __init__(self, num_class, num_heading_bin, num_size_cluster, mean_size_arr,
input_feature_dim=0, num_proposal=128, vote_factor=1, sampling='vote_fps', backbone=None):
super().__init__()
self.num_class = num_class
self.num_heading_bin = num_heading_bin
self.num_size_cluster = num_size_cluster
self.mean_size_arr = mean_size_arr
assert(mean_size_arr.shape[0] == self.num_size_cluster)
self.input_feature_dim = input_feature_dim
self.num_proposal = num_proposal
self.vote_factor = vote_factor
self.sampling=sampling
# Backbone point feature learning
self.backbone_net = Pointnet2Backbone(input_feature_dim=self.input_feature_dim)
# Box proposal, aggregation and detection
self.pnet = ProposalModule(num_class, num_heading_bin, num_size_cluster,
mean_size_arr, num_proposal, sampling)
def forward(self, inputs):
""" Forward pass of the network
Args:
inputs: dict
{point_clouds}
point_clouds: Variable(torch.cuda.FloatTensor)
(B, N, 3 + input_channels) tensor
Point cloud to run predicts on
Each point in the point-cloud MUST
be formated as (x, y, z, features...)
Returns:
end_points: dict
"""
end_points = {}
batch_size = inputs['point_clouds'].shape[0]
end_points = self.backbone_net(inputs['point_clouds'], end_points)
xyz = end_points['fp2_xyz']
features = end_points['fp2_features']
end_points['seed_inds'] = end_points['fp2_inds']
end_points['seed_xyz'] = xyz
end_points['seed_features'] = features
# Directly predict bounding boxes (skips voting)
end_points = self.pnet(xyz, features, end_points)
return end_points
if __name__=='__main__':
sys.path.append(os.path.join(ROOT_DIR, 'sunrgbd'))
from sunrgbd_detection_dataset import SunrgbdDetectionVotesDataset, DC
# Define dataset
TRAIN_DATASET = SunrgbdDetectionVotesDataset('train', num_points=20000, use_v1=True)
# Define model
model = BoxNet(10,12,10,np.random.random((10,3))).cuda()
# Model forward pass
sample = TRAIN_DATASET[5]
inputs = {'point_clouds': torch.from_numpy(sample['point_clouds']).unsqueeze(0).cuda()}
end_points = model(inputs)
for key in end_points:
print(key, end_points[key])
# Compute loss
for key in sample:
end_points[key] = torch.from_numpy(sample[key]).unsqueeze(0).cuda()
loss, end_points = get_loss(end_points, DC)
print('loss', loss)
end_points['point_clouds'] = inputs['point_clouds']
end_points['pred_mask'] = np.ones((1,128))
dump_results(end_points, 'tmp', DC)
| ContrastiveSceneContexts-main | downstream/votenet/models/boxnet.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import sys
import os
from models.backbone.pointnet2.pointnet2_modules import PointnetSAModuleVotes, PointnetFPModule
from models.backbone.pointnet2.pointnet2_utils import furthest_point_sample
from models.backbone.sparseconv.config import get_config
from models.backbone.sparseconv.models_sparseconv import load_model
import MinkowskiEngine as ME
class Pointnet2Backbone(nn.Module):
r"""
Backbone network for point cloud feature learning.
Based on Pointnet++ single-scale grouping network.
Parameters
----------
input_feature_dim: int
Number of input channels in the feature descriptor for each point.
e.g. 3 for RGB.
"""
def __init__(self, input_feature_dim=0):
super().__init__()
self.sa1 = PointnetSAModuleVotes(
npoint=2048,
radius=0.2,
nsample=64,
mlp=[input_feature_dim, 64, 64, 128],
use_xyz=True,
normalize_xyz=True
)
self.sa2 = PointnetSAModuleVotes(
npoint=1024,
radius=0.4,
nsample=32,
mlp=[128, 128, 128, 256],
use_xyz=True,
normalize_xyz=True
)
self.sa3 = PointnetSAModuleVotes(
npoint=512,
radius=0.8,
nsample=16,
mlp=[256, 128, 128, 256],
use_xyz=True,
normalize_xyz=True
)
self.sa4 = PointnetSAModuleVotes(
npoint=256,
radius=1.2,
nsample=16,
mlp=[256, 128, 128, 256],
use_xyz=True,
normalize_xyz=True
)
self.fp1 = PointnetFPModule(mlp=[256+256,256,256])
self.fp2 = PointnetFPModule(mlp=[256+256,256,256])
def _break_up_pc(self, pc):
xyz = pc[..., 0:3].contiguous()
features = (
pc[..., 3:].transpose(1, 2).contiguous()
if pc.size(-1) > 3 else None
)
return xyz, features
def forward(self, pointcloud: torch.cuda.FloatTensor, end_points=None):
r"""
Forward pass of the network
Parameters
----------
pointcloud: Variable(torch.cuda.FloatTensor)
(B, N, 3 + input_feature_dim) tensor
Point cloud to run predicts on
Each point in the point-cloud MUST
be formated as (x, y, z, features...)
Returns
----------
end_points: {XXX_xyz, XXX_features, XXX_inds}
XXX_xyz: float32 Tensor of shape (B,K,3)
XXX_features: float32 Tensor of shape (B,K,D)
XXX-inds: int64 Tensor of shape (B,K) values in [0,N-1]
"""
if not end_points: end_points = {}
batch_size = pointcloud.shape[0]
xyz, features = self._break_up_pc(pointcloud)
# --------- 4 SET ABSTRACTION LAYERS ---------
xyz, features, fps_inds = self.sa1(xyz, features)
end_points['sa1_inds'] = fps_inds
end_points['sa1_xyz'] = xyz
end_points['sa1_features'] = features
xyz, features, fps_inds = self.sa2(xyz, features) # this fps_inds is just 0,1,...,1023
end_points['sa2_inds'] = fps_inds
end_points['sa2_xyz'] = xyz
end_points['sa2_features'] = features
xyz, features, fps_inds = self.sa3(xyz, features) # this fps_inds is just 0,1,...,511
end_points['sa3_xyz'] = xyz
end_points['sa3_features'] = features
xyz, features, fps_inds = self.sa4(xyz, features) # this fps_inds is just 0,1,...,255
end_points['sa4_xyz'] = xyz
end_points['sa4_features'] = features
# --------- 2 FEATURE UPSAMPLING LAYERS --------
features = self.fp1(end_points['sa3_xyz'], end_points['sa4_xyz'], end_points['sa3_features'], end_points['sa4_features'])
features = self.fp2(end_points['sa2_xyz'], end_points['sa3_xyz'], end_points['sa2_features'], features)
end_points['fp2_features'] = features
end_points['fp2_xyz'] = end_points['sa2_xyz']
num_seed = end_points['fp2_xyz'].shape[1]
end_points['fp2_inds'] = end_points['sa1_inds'][:,0:num_seed] # indices among the entire input point clouds
return end_points
class SparseConvBackbone(nn.Module):
def __init__(self,
input_feature_dim=3,
output_feature_dim=256,
num_seed=1024,
model='Res16UNet34C',
config=None):
super().__init__()
config = get_config(["--conv1_kernel_size", "3", "--model", model])
# from pdb import set_trace; set_trace()
self.net = load_model(model)(
input_feature_dim, output_feature_dim, config)
self.num_seed = num_seed
def forward(self, points, coords, feats, inds, end_points=None):
inputs = ME.SparseTensor(feats.cpu(), coords=coords.cpu().int()).to(coords.device)
outputs = self.net(inputs)
features = outputs.F
# randomly down-sample to num_seed points & create batches
bsz, num_points, _ = points.size()
points = points.view(-1, 3)
batch_ids = coords[:, 0]
voxel_ids = inds + batch_ids * num_points
sampled_inds, sampled_feartures, sampled_points = [], [], []
for b in range(bsz):
sampled_id = furthest_point_sample(
points[voxel_ids[batch_ids == b]].unsqueeze(0),
self.num_seed).squeeze(0).long()
sampled_inds.append(inds[batch_ids == b][sampled_id])
sampled_feartures.append(features[batch_ids == b][sampled_id])
sampled_points.append(points[voxel_ids[batch_ids == b]][sampled_id])
end_points['fp2_features'] = torch.stack(sampled_feartures, 0).transpose(1, 2)
end_points['fp2_xyz'] = torch.stack(sampled_points, 0)
end_points['fp2_inds'] = torch.stack(sampled_inds, 0)
# from pdb import set_trace; set_trace()
return end_points
if __name__=='__main__':
backbone_net = Pointnet2Backbone(input_feature_dim=3).cuda()
print(backbone_net)
backbone_net.eval()
out = backbone_net(torch.rand(16,20000,6).cuda())
for key in sorted(out.keys()):
print(key, '\t', out[key].shape)
| ContrastiveSceneContexts-main | downstream/votenet/models/backbone_module.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" Deep hough voting network for 3D object detection in point clouds.
Author: Charles R. Qi and Or Litany
"""
import torch
import torch.nn as nn
import numpy as np
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
from backbone_module import Pointnet2Backbone, SparseConvBackbone
from voting_module import VotingModule
from proposal_module import ProposalModule
from dump_helper import dump_results
from loss_helper import get_loss
class VoteNet(nn.Module):
r"""
A deep neural network for 3D object detection with end-to-end optimizable hough voting.
Parameters
----------
num_class: int
Number of semantics classes to predict over -- size of softmax classifier
num_heading_bin: int
num_size_cluster: int
input_feature_dim: (default: 0)
Input dim in the feature descriptor for each point. If the point cloud is Nx9, this
value should be 6 as in an Nx9 point cloud, 3 of the channels are xyz, and 6 are feature descriptors
num_proposal: int (default: 128)
Number of proposals/detections generated from the network. Each proposal is a 3D OBB with a semantic class.
vote_factor: (default: 1)
Number of votes generated from each seed point.
"""
def __init__(self, num_class, num_heading_bin, num_size_cluster, mean_size_arr,
input_feature_dim=0, num_proposal=128, vote_factor=1, sampling='vote_fps',
backbone='pointnet2'):
super().__init__()
self.num_class = num_class
self.num_heading_bin = num_heading_bin
self.num_size_cluster = num_size_cluster
self.mean_size_arr = mean_size_arr
assert(mean_size_arr.shape[0] == self.num_size_cluster)
self.input_feature_dim = input_feature_dim
self.num_proposal = num_proposal
self.vote_factor = vote_factor
self.sampling=sampling
self.backbone = backbone
# Backbone point feature learning
if backbone == 'pointnet2':
self.backbone_net = Pointnet2Backbone(input_feature_dim=self.input_feature_dim)
else:
self.backbone_net = SparseConvBackbone(
input_feature_dim=self.input_feature_dim + 3,
output_feature_dim=256,
num_seed=1024)
# from pdb import set_trace; set_trace()
# Hough voting
self.vgen = VotingModule(self.vote_factor, 256)
# Vote aggregation and detection
self.pnet = ProposalModule(num_class, num_heading_bin, num_size_cluster,
mean_size_arr, num_proposal, sampling)
def forward(self, inputs):
""" Forward pass of the network
Args:
inputs: dict
{point_clouds}
point_clouds: Variable(torch.cuda.FloatTensor)
(B, N, 3 + input_channels) tensor
Point cloud to run predicts on
Each point in the point-cloud MUST
be formated as (x, y, z, features...)
Returns:
end_points: dict
"""
end_points = {}
batch_size = inputs['point_clouds'].shape[0]
if self.backbone == 'pointnet2':
end_points = self.backbone_net(
inputs['point_clouds'], end_points)
else:
end_points = self.backbone_net(
inputs['point_clouds'],
inputs['voxel_coords'],
inputs['voxel_feats'],
inputs['voxel_inds'],
end_points)
# from pdb import set_trace; set_trace()
# --------- HOUGH VOTING ---------
xyz = end_points['fp2_xyz']
features = end_points['fp2_features']
end_points['seed_inds'] = end_points['fp2_inds']
end_points['seed_xyz'] = xyz
end_points['seed_features'] = features
xyz, features = self.vgen(xyz, features)
features_norm = torch.norm(features, p=2, dim=1)
features = features.div(features_norm.unsqueeze(1))
end_points['vote_xyz'] = xyz
end_points['vote_features'] = features
end_points = self.pnet(xyz, features, end_points)
return end_points
if __name__=='__main__':
sys.path.append(os.path.join(ROOT_DIR, 'sunrgbd'))
from sunrgbd_detection_dataset import SunrgbdDetectionVotesDataset, DC
from loss_helper import get_loss
# Define model
model = VoteNet(10,12,10,np.random.random((10,3))).cuda()
try:
# Define dataset
TRAIN_DATASET = SunrgbdDetectionVotesDataset('train', num_points=20000, use_v1=True)
# Model forward pass
sample = TRAIN_DATASET[5]
inputs = {'point_clouds': torch.from_numpy(sample['point_clouds']).unsqueeze(0).cuda()}
except:
print('Dataset has not been prepared. Use a random sample.')
inputs = {'point_clouds': torch.rand((20000,3)).unsqueeze(0).cuda()}
end_points = model(inputs)
for key in end_points:
print(key, end_points[key])
try:
# Compute loss
for key in sample:
end_points[key] = torch.from_numpy(sample[key]).unsqueeze(0).cuda()
loss, end_points = get_loss(end_points, DC)
print('loss', loss)
end_points['point_clouds'] = inputs['point_clouds']
end_points['pred_mask'] = np.ones((1,128))
dump_results(end_points, 'tmp', DC)
except:
print('Dataset has not been prepared. Skip loss and dump.')
| ContrastiveSceneContexts-main | downstream/votenet/models/votenet.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import torch
def str2opt(arg):
assert arg in ['SGD', 'Adam']
return arg
def str2scheduler(arg):
assert arg in ['StepLR', 'PolyLR', 'ExpLR', 'SquaredLR']
return arg
def str2bool(v):
return v.lower() in ('true', '1')
def str2list(l):
return [int(i) for i in l.split(',')]
def add_argument_group(name):
arg = parser.add_argument_group(name)
arg_lists.append(arg)
return arg
arg_lists = []
parser = argparse.ArgumentParser()
# Network
net_arg = add_argument_group('Network')
net_arg.add_argument('--model', type=str,
default='ResUNet14', help='Model name')
net_arg.add_argument(
'--conv1_kernel_size', type=int, default=3, help='First layer conv kernel size')
net_arg.add_argument('--weights', type=str, default='None',
help='Saved weights to load')
net_arg.add_argument(
'--weights_for_inner_model',
type=str2bool,
default=False,
help='Weights for model inside a wrapper')
net_arg.add_argument(
'--dilations', type=str2list, default='1,1,1,1', help='Dilations used for ResNet or DenseNet')
# Wrappers
net_arg.add_argument('--wrapper_type', default='None',
type=str, help='Wrapper on the network')
net_arg.add_argument(
'--wrapper_region_type',
default=1,
type=int,
help='Wrapper connection types 0: hypercube, 1: hypercross, (default: 1)')
net_arg.add_argument('--wrapper_kernel_size', default=3,
type=int, help='Wrapper kernel size')
net_arg.add_argument(
'--wrapper_lr',
default=1e-1,
type=float,
help='Used for freezing or using small lr for the base model, freeze if negative')
# Meanfield arguments
net_arg.add_argument(
'--meanfield_iterations', type=int, default=10, help='Number of meanfield iterations')
net_arg.add_argument('--crf_spatial_sigma', default=1,
type=int, help='Trilateral spatial sigma')
net_arg.add_argument(
'--crf_chromatic_sigma', default=12, type=int, help='Trilateral chromatic sigma')
# Optimizer arguments
opt_arg = add_argument_group('Optimizer')
opt_arg.add_argument('--optimizer', type=str, default='SGD')
opt_arg.add_argument('--lr', type=float, default=1e-2)
opt_arg.add_argument('--sgd_momentum', type=float, default=0.9)
opt_arg.add_argument('--sgd_dampening', type=float, default=0.1)
opt_arg.add_argument('--adam_beta1', type=float, default=0.9)
opt_arg.add_argument('--adam_beta2', type=float, default=0.999)
opt_arg.add_argument('--weight_decay', type=float, default=1e-4)
opt_arg.add_argument('--param_histogram_freq', type=int, default=100)
opt_arg.add_argument('--save_param_histogram', type=str2bool, default=False)
opt_arg.add_argument('--iter_size', type=int, default=1,
help='accumulate gradient')
opt_arg.add_argument('--bn_momentum', type=float, default=0.02)
# Scheduler
opt_arg.add_argument('--scheduler', type=str2scheduler, default='StepLR')
opt_arg.add_argument('--max_iter', type=int, default=6e4)
opt_arg.add_argument('--step_size', type=int, default=2e4)
opt_arg.add_argument('--step_gamma', type=float, default=0.1)
opt_arg.add_argument('--poly_power', type=float, default=0.9)
opt_arg.add_argument('--exp_gamma', type=float, default=0.95)
opt_arg.add_argument('--exp_step_size', type=float, default=445)
# Directories
dir_arg = add_argument_group('Directories')
dir_arg.add_argument('--log_dir', type=str, default='outputs/default')
dir_arg.add_argument('--data_dir', type=str, default='data')
# Data
data_arg = add_argument_group('Data')
data_arg.add_argument('--dataset', type=str,
default='ScannetVoxelization2cmDataset')
data_arg.add_argument('--temporal_dilation', type=int, default=30)
data_arg.add_argument('--temporal_numseq', type=int, default=3)
data_arg.add_argument('--point_lim', type=int, default=-1)
data_arg.add_argument('--pre_point_lim', type=int, default=-1)
data_arg.add_argument('--batch_size', type=int, default=16)
data_arg.add_argument('--val_batch_size', type=int, default=1)
data_arg.add_argument('--test_batch_size', type=int, default=1)
data_arg.add_argument('--cache_data', type=str2bool, default=False)
data_arg.add_argument(
'--num_workers', type=int, default=1, help='num workers for train/test dataloader')
data_arg.add_argument('--num_val_workers', type=int,
default=1, help='num workers for val dataloader')
data_arg.add_argument('--ignore_label', type=int, default=255)
data_arg.add_argument('--return_transformation', type=str2bool, default=False)
data_arg.add_argument('--ignore_duplicate_class', type=str2bool, default=False)
data_arg.add_argument('--partial_crop', type=float, default=0.)
data_arg.add_argument('--train_limit_numpoints', type=int, default=0)
# Point Cloud Dataset
data_arg.add_argument(
'--synthia_path',
type=str,
default='/home/chrischoy/datasets/Synthia/Synthia4D',
help='Point Cloud dataset root dir')
# For temporal sequences
data_arg.add_argument(
'--synthia_camera_path', type=str, default='/home/chrischoy/datasets/Synthia/%s/CameraParams/')
data_arg.add_argument('--synthia_camera_intrinsic_file',
type=str, default='intrinsics.txt')
data_arg.add_argument(
'--synthia_camera_extrinsics_file', type=str, default='Stereo_Right/Omni_F/%s.txt')
data_arg.add_argument('--temporal_rand_dilation', type=str2bool, default=False)
data_arg.add_argument('--temporal_rand_numseq', type=str2bool, default=False)
data_arg.add_argument(
'--scannet_path',
type=str,
default='/home/chrischoy/datasets/scannet/scannet_preprocessed',
help='Scannet online voxelization dataset root dir')
data_arg.add_argument(
'--stanford3d_path',
type=str,
default='/home/chrischoy/datasets/Stanford3D',
help='Stanford precropped dataset root dir')
# Training / test parameters
train_arg = add_argument_group('Training')
train_arg.add_argument('--is_train', type=str2bool, default=True)
train_arg.add_argument('--stat_freq', type=int,
default=40, help='print frequency')
train_arg.add_argument('--test_stat_freq', type=int,
default=100, help='print frequency')
train_arg.add_argument('--save_freq', type=int,
default=1000, help='save frequency')
train_arg.add_argument('--val_freq', type=int,
default=1000, help='validation frequency')
train_arg.add_argument(
'--empty_cache_freq', type=int, default=1, help='Clear pytorch cache frequency')
train_arg.add_argument('--train_phase', type=str,
default='train', help='Dataset for training')
train_arg.add_argument('--val_phase', type=str,
default='val', help='Dataset for validation')
train_arg.add_argument(
'--overwrite_weights', type=str2bool, default=True, help='Overwrite checkpoint during training')
train_arg.add_argument(
'--resume', default=None, type=str, help='path to latest checkpoint (default: none)')
train_arg.add_argument(
'--resume_optimizer',
default=True,
type=str2bool,
help='Use checkpoint optimizer states when resume training')
train_arg.add_argument('--eval_upsample', type=str2bool, default=False)
train_arg.add_argument(
'--lenient_weight_loading',
type=str2bool,
default=False,
help='Weights with the same size will be loaded')
# Distributed Training configurations
ddp_arg = add_argument_group('Distributed')
ddp_arg.add_argument('--distributed-world-size', type=int, metavar='N',
default=max(1, torch.cuda.device_count()),
help='total number of GPUs across all nodes (default: all visible GPUs)')
ddp_arg.add_argument('--distributed-rank', default=0, type=int,
help='rank of the current worker')
ddp_arg.add_argument('--distributed-backend', default='nccl', type=str,
help='distributed backend')
ddp_arg.add_argument('--distributed-init-method', default=None, type=str,
help='typically tcp://hostname:port that will be used to '
'establish initial connetion')
ddp_arg.add_argument('--distributed-port', default=-1, type=int,
help='port number (not required if using --distributed-init-method)')
ddp_arg.add_argument('--device-id', '--local_rank', default=0, type=int,
help='which GPU to use (usually configured automatically)')
ddp_arg.add_argument('--distributed-no-spawn', action='store_true',
help='do not spawn multiple processes even if multiple GPUs are visible')
ddp_arg.add_argument('--ddp-backend', default='c10d', type=str,
choices=['c10d', 'no_c10d'],
help='DistributedDataParallel backend')
ddp_arg.add_argument('--bucket-cap-mb', default=25, type=int, metavar='MB',
help='bucket size for reduction')
# Data augmentation
data_aug_arg = add_argument_group('DataAugmentation')
data_aug_arg.add_argument(
'--use_feat_aug', type=str2bool, default=True, help='Simple feat augmentation')
data_aug_arg.add_argument(
'--data_aug_color_trans_ratio', type=float, default=0.10, help='Color translation range')
data_aug_arg.add_argument(
'--data_aug_color_jitter_std', type=float, default=0.05, help='STD of color jitter')
data_aug_arg.add_argument('--normalize_color', type=str2bool, default=True)
data_aug_arg.add_argument('--data_aug_scale_min', type=float, default=0.9)
data_aug_arg.add_argument('--data_aug_scale_max', type=float, default=1.1)
data_aug_arg.add_argument(
'--data_aug_hue_max', type=float, default=0.5, help='Hue translation range. [0, 1]')
data_aug_arg.add_argument(
'--data_aug_saturation_max',
type=float,
default=0.20,
help='Saturation translation range, [0, 1]')
# Test
test_arg = add_argument_group('Test')
test_arg.add_argument('--visualize', type=str2bool, default=False)
test_arg.add_argument('--test_temporal_average', type=str2bool, default=False)
test_arg.add_argument('--visualize_path', type=str,
default='outputs/visualize')
test_arg.add_argument('--save_prediction', type=str2bool, default=False)
test_arg.add_argument('--save_pred_dir', type=str, default='outputs/pred')
test_arg.add_argument('--test_phase', type=str,
default='test', help='Dataset for test')
test_arg.add_argument(
'--evaluate_original_pointcloud',
type=str2bool,
default=False,
help='Test on the original pointcloud space during network evaluation using voxel projection.')
test_arg.add_argument(
'--test_original_pointcloud',
type=str2bool,
default=False,
help='Test on the original pointcloud space as given by the dataset using kd-tree.')
# Misc
misc_arg = add_argument_group('Misc')
misc_arg.add_argument('--is_cuda', type=str2bool, default=True)
misc_arg.add_argument('--load_path', type=str, default='')
misc_arg.add_argument('--log_step', type=int, default=50)
misc_arg.add_argument('--log_level', type=str,
default='INFO', choices=['INFO', 'DEBUG', 'WARN'])
misc_arg.add_argument('--num_gpu', type=str2bool, default=1)
misc_arg.add_argument('--seed', type=int, default=123)
def get_config(args=None):
config = parser.parse_args(args=args)
return config # Training settings
| ContrastiveSceneContexts-main | downstream/votenet/models/backbone/sparseconv/config.py |
# coding: utf-8
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import numpy as np
import torch
from torch.utils.data import Dataset
from torch.utils.data._utils.collate import default_collate
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import MinkowskiEngine as ME
class VoxelizationDataset(Dataset):
"""
Wrapper dataset which voxelize the original point clouds
"""
def __init__(self, dataset, voxel_size=0.05):
self.dataset = dataset
self.VOXEL_SIZE = voxel_size
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
ret_dict = self.dataset[idx]
# voxelization
coords = np.floor(ret_dict['point_clouds'] / self.VOXEL_SIZE)
inds = ME.utils.sparse_quantize(coords, return_index=True)
coords = coords[inds].astype(np.int32)
colors = ret_dict['pcl_color'][inds]
ret_dict['voxel'] = (coords, np.array(inds, dtype=np.int32), colors)
return ret_dict
def collate_fn(samples):
data, voxel = [], []
for sample in samples:
data.append({w: sample[w] for w in sample if w != 'voxel'})
voxel.append(sample['voxel'])
# for non-voxel data, use default collate
data_batch = default_collate(data)
batch_ids = np.array(
[b for b, v in enumerate(voxel) for _ in range(v[0].shape[0])])
voxel_ids = np.concatenate([v[1] for v in voxel], 0)
coords = np.concatenate([v[0] for v in voxel], 0)
coords = np.concatenate([batch_ids[:, None], coords], 1)
colors = np.concatenate([v[2] for v in voxel], 0)
data_batch['voxel_coords'] = torch.from_numpy(coords)
data_batch['voxel_inds'] = torch.from_numpy(voxel_ids)
#data_batch['voxel_feats'] = data_batch['point_clouds'].new_ones(batch_ids.shape[0], 3)
data_batch['voxel_feats'] = torch.from_numpy(colors).float()
return data_batch
| ContrastiveSceneContexts-main | downstream/votenet/models/backbone/sparseconv/voxelized_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree. | ContrastiveSceneContexts-main | downstream/votenet/models/backbone/sparseconv/__init__.py |
import collections
import numpy as np
import MinkowskiEngine as ME
from scipy.linalg import expm, norm
# Rotation matrix along axis with angle theta
def M(axis, theta):
return expm(np.cross(np.eye(3), axis / norm(axis) * theta))
class Voxelizer:
def __init__(self,
voxel_size=1,
clip_bound=None,
use_augmentation=False,
scale_augmentation_bound=None,
rotation_augmentation_bound=None,
translation_augmentation_ratio_bound=None,
ignore_label=255):
"""
Args:
voxel_size: side length of a voxel
clip_bound: boundary of the voxelizer. Points outside the bound will be deleted
expects either None or an array like ((-100, 100), (-100, 100), (-100, 100)).
scale_augmentation_bound: None or (0.9, 1.1)
rotation_augmentation_bound: None or ((np.pi / 6, np.pi / 6), None, None) for 3 axis.
Use random order of x, y, z to prevent bias.
translation_augmentation_bound: ((-5, 5), (0, 0), (-10, 10))
ignore_label: label assigned for ignore (not a training label).
"""
self.voxel_size = voxel_size
self.clip_bound = clip_bound
self.ignore_label = ignore_label
# Augmentation
self.use_augmentation = use_augmentation
self.scale_augmentation_bound = scale_augmentation_bound
self.rotation_augmentation_bound = rotation_augmentation_bound
self.translation_augmentation_ratio_bound = translation_augmentation_ratio_bound
def get_transformation_matrix(self):
voxelization_matrix, rotation_matrix = np.eye(4), np.eye(4)
# Get clip boundary from config or pointcloud.
# Get inner clip bound to crop from.
# Transform pointcloud coordinate to voxel coordinate.
# 1. Random rotation
rot_mat = np.eye(3)
if self.use_augmentation and self.rotation_augmentation_bound is not None:
if isinstance(self.rotation_augmentation_bound, collections.Iterable):
rot_mats = []
for axis_ind, rot_bound in enumerate(self.rotation_augmentation_bound):
theta = 0
axis = np.zeros(3)
axis[axis_ind] = 1
if rot_bound is not None:
theta = np.random.uniform(*rot_bound)
rot_mats.append(M(axis, theta))
# Use random order
np.random.shuffle(rot_mats)
rot_mat = rot_mats[0] @ rot_mats[1] @ rot_mats[2]
else:
raise ValueError()
rotation_matrix[:3, :3] = rot_mat
# 2. Scale and translate to the voxel space.
scale = 1 / self.voxel_size
if self.use_augmentation and self.scale_augmentation_bound is not None:
scale *= np.random.uniform(*self.scale_augmentation_bound)
np.fill_diagonal(voxelization_matrix[:3, :3], scale)
# Get final transformation matrix.
return voxelization_matrix, rotation_matrix
def clip(self, coords, center=None, trans_aug_ratio=None):
bound_min = np.min(coords, 0).astype(float)
bound_max = np.max(coords, 0).astype(float)
bound_size = bound_max - bound_min
if center is None:
center = bound_min + bound_size * 0.5
if trans_aug_ratio is not None:
trans = np.multiply(trans_aug_ratio, bound_size)
center += trans
lim = self.clip_bound
if isinstance(self.clip_bound, (int, float)):
if bound_size.max() < self.clip_bound:
return None
else:
clip_inds = ((coords[:, 0] >= (-lim + center[0])) &
(coords[:, 0] < (lim + center[0])) &
(coords[:, 1] >= (-lim + center[1])) &
(coords[:, 1] < (lim + center[1])) &
(coords[:, 2] >= (-lim + center[2])) &
(coords[:, 2] < (lim + center[2])))
return clip_inds
# Clip points outside the limit
clip_inds = ((coords[:, 0] >= (lim[0][0] + center[0])) &
(coords[:, 0] < (lim[0][1] + center[0])) &
(coords[:, 1] >= (lim[1][0] + center[1])) &
(coords[:, 1] < (lim[1][1] + center[1])) &
(coords[:, 2] >= (lim[2][0] + center[2])) &
(coords[:, 2] < (lim[2][1] + center[2])))
return clip_inds
def voxelize(self, coords, feats, labels, center=None):
assert coords.shape[1] == 3 and coords.shape[0] == feats.shape[0] and coords.shape[0]
if self.clip_bound is not None:
trans_aug_ratio = np.zeros(3)
if self.use_augmentation and self.translation_augmentation_ratio_bound is not None:
for axis_ind, trans_ratio_bound in enumerate(self.translation_augmentation_ratio_bound):
trans_aug_ratio[axis_ind] = np.random.uniform(
*trans_ratio_bound)
clip_inds = self.clip(coords, center, trans_aug_ratio)
if clip_inds is not None:
coords, feats = coords[clip_inds], feats[clip_inds]
if labels is not None:
labels = labels[clip_inds]
# Get rotation and scale
M_v, M_r = self.get_transformation_matrix()
# Apply transformations
rigid_transformation = M_v
if self.use_augmentation:
rigid_transformation = M_r @ rigid_transformation
homo_coords = np.hstack(
(coords, np.ones((coords.shape[0], 1), dtype=coords.dtype)))
coords_aug = np.floor(homo_coords @ rigid_transformation.T[:, :3])
# Align all coordinates to the origin.
min_coords = coords_aug.min(0)
M_t = np.eye(4)
M_t[:3, -1] = -min_coords
rigid_transformation = M_t @ rigid_transformation
coords_aug = np.floor(coords_aug - min_coords)
# key = self.hash(coords_aug) # floor happens by astype(np.uint64)
coords_aug, feats, labels = ME.utils.sparse_quantize(
coords_aug, feats, labels=labels, ignore_label=self.ignore_label)
return coords_aug, feats, labels, rigid_transformation.flatten()
def voxelize_temporal(self,
coords_t,
feats_t,
labels_t,
centers=None,
return_transformation=False):
# Legacy code, remove
if centers is None:
centers = [
None,
] * len(coords_t)
coords_tc, feats_tc, labels_tc, transformation_tc = [], [], [], []
# ######################### Data Augmentation #############################
# Get rotation and scale
M_v, M_r = self.get_transformation_matrix()
# Apply transformations
rigid_transformation = M_v
if self.use_augmentation:
rigid_transformation = M_r @ rigid_transformation
# ######################### Voxelization #############################
# Voxelize coords
for coords, feats, labels, center in zip(coords_t, feats_t, labels_t, centers):
###################################
# Clip the data if bound exists
if self.clip_bound is not None:
trans_aug_ratio = np.zeros(3)
if self.use_augmentation and self.translation_augmentation_ratio_bound is not None:
for axis_ind, trans_ratio_bound in enumerate(self.translation_augmentation_ratio_bound):
trans_aug_ratio[axis_ind] = np.random.uniform(
*trans_ratio_bound)
clip_inds = self.clip(coords, center, trans_aug_ratio)
if clip_inds is not None:
coords, feats = coords[clip_inds], feats[clip_inds]
if labels is not None:
labels = labels[clip_inds]
###################################
homo_coords = np.hstack(
(coords, np.ones((coords.shape[0], 1), dtype=coords.dtype)))
coords_aug = np.floor(homo_coords @ rigid_transformation.T)[:, :3]
coords_aug, feats, labels = ME.utils.sparse_quantize(
coords_aug, feats, labels=labels, ignore_label=self.ignore_label)
coords_tc.append(coords_aug)
feats_tc.append(feats)
labels_tc.append(labels)
transformation_tc.append(rigid_transformation.flatten())
return_args = [coords_tc, feats_tc, labels_tc]
if return_transformation:
return_args.append(transformation_tc)
return tuple(return_args)
def test():
N = 16575
coords = np.random.rand(N, 3) * 10
feats = np.random.rand(N, 4)
labels = np.floor(np.random.rand(N) * 3)
coords[:3] = 0
labels[:3] = 2
voxelizer = Voxelizer()
print(voxelizer.voxelize(coords, feats, labels))
if __name__ == '__main__':
test()
| ContrastiveSceneContexts-main | downstream/votenet/models/backbone/sparseconv/voxelizer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import random
from torch.nn import Module
from MinkowskiEngine import SparseTensor
class Wrapper(Module):
"""
Wrapper for the segmentation networks.
"""
OUT_PIXEL_DIST = -1
def __init__(self, NetClass, in_nchannel, out_nchannel, config):
super(Wrapper, self).__init__()
self.initialize_filter(NetClass, in_nchannel, out_nchannel, config)
def initialize_filter(self, NetClass, in_nchannel, out_nchannel, config):
raise NotImplementedError('Must initialize a model and a filter')
def forward(self, x, coords, colors=None):
soutput = self.model(x)
# During training, make the network invariant to the filter
if not self.training or random.random() < 0.5:
# Filter requires the model to finish the forward pass
wrapper_coords = self.filter.initialize_coords(self.model, coords, colors)
finput = SparseTensor(soutput.F, wrapper_coords)
soutput = self.filter(finput)
return soutput
| ContrastiveSceneContexts-main | downstream/votenet/models/backbone/sparseconv/models_sparseconv/wrapper.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from models.backbone.sparseconv.models_sparseconv.resnet import ResNetBase, get_norm
from models.backbone.sparseconv.models_sparseconv.modules.common import ConvType, NormType, conv, conv_tr
from models.backbone.sparseconv.models_sparseconv.modules.resnet_block import BasicBlock, BasicBlockINBN, Bottleneck
import torch.nn as nn
import MinkowskiEngine as ME
from MinkowskiEngine import MinkowskiReLU
import MinkowskiEngine.MinkowskiOps as me
class MinkUNetBase(ResNetBase):
BLOCK = None
PLANES = (64, 128, 256, 512, 256, 128, 128)
DILATIONS = (1, 1, 1, 1, 1, 1)
LAYERS = (2, 2, 2, 2, 2, 2)
INIT_DIM = 64
OUT_PIXEL_DIST = 1
NORM_TYPE = NormType.BATCH_NORM
NON_BLOCK_CONV_TYPE = ConvType.SPATIAL_HYPERCUBE
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS
# To use the model, must call initialize_coords before forward pass.
# Once data is processed, call clear to reset the model before calling initialize_coords
def __init__(self, in_channels, out_channels, config, D=3, **kwargs):
super(MinkUNetBase, self).__init__(in_channels, out_channels, config, D)
def network_initialization(self, in_channels, out_channels, config, D):
# Setup net_metadata
dilations = self.DILATIONS
bn_momentum = config.bn_momentum
def space_n_time_m(n, m):
return n if D == 3 else [n, n, n, m]
if D == 4:
self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1)
# Output of the first conv concated to conv6
self.inplanes = self.INIT_DIM
self.conv1p1s1 = conv(
in_channels,
self.inplanes,
kernel_size=space_n_time_m(config.conv1_kernel_size, 1),
stride=1,
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn1 = get_norm(self.NORM_TYPE, self.PLANES[0], D, bn_momentum=bn_momentum)
self.block1 = self._make_layer(
self.BLOCK,
self.PLANES[0],
self.LAYERS[0],
dilation=dilations[0],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv2p1s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn2 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block2 = self._make_layer(
self.BLOCK,
self.PLANES[1],
self.LAYERS[1],
dilation=dilations[1],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv3p2s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn3 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block3 = self._make_layer(
self.BLOCK,
self.PLANES[2],
self.LAYERS[2],
dilation=dilations[2],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv4p4s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn4 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block4 = self._make_layer(
self.BLOCK,
self.PLANES[3],
self.LAYERS[3],
dilation=dilations[3],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr4p8s2 = conv_tr(
self.inplanes,
self.PLANES[4],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr4 = get_norm(self.NORM_TYPE, self.PLANES[4], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion
self.block5 = self._make_layer(
self.BLOCK,
self.PLANES[4],
self.LAYERS[4],
dilation=dilations[4],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr5p4s2 = conv_tr(
self.inplanes,
self.PLANES[5],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr5 = get_norm(self.NORM_TYPE, self.PLANES[5], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion
self.block6 = self._make_layer(
self.BLOCK,
self.PLANES[5],
self.LAYERS[5],
dilation=dilations[5],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr6p2s2 = conv_tr(
self.inplanes,
self.PLANES[6],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr6 = get_norm(self.NORM_TYPE, self.PLANES[6], D, bn_momentum=bn_momentum)
self.relu = MinkowskiReLU(inplace=True)
self.final = nn.Sequential(
conv(
self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion,
512,
kernel_size=1,
stride=1,
dilation=1,
bias=False,
D=D), ME.MinkowskiBatchNorm(512), ME.MinkowskiReLU(),
conv(512, out_channels, kernel_size=1, stride=1, dilation=1, bias=True, D=D))
def forward(self, x):
out = self.conv1p1s1(x)
out = self.bn1(out)
out = self.relu(out)
out_b1p1 = self.block1(out)
out = self.conv2p1s2(out_b1p1)
out = self.bn2(out)
out = self.relu(out)
out_b2p2 = self.block2(out)
out = self.conv3p2s2(out_b2p2)
out = self.bn3(out)
out = self.relu(out)
out_b3p4 = self.block3(out)
out = self.conv4p4s2(out_b3p4)
out = self.bn4(out)
out = self.relu(out)
# pixel_dist=8
out = self.block4(out)
out = self.convtr4p8s2(out)
out = self.bntr4(out)
out = self.relu(out)
out = me.cat(out, out_b3p4)
out = self.block5(out)
out = self.convtr5p4s2(out)
out = self.bntr5(out)
out = self.relu(out)
out = me.cat(out, out_b2p2)
out = self.block6(out)
out = self.convtr6p2s2(out)
out = self.bntr6(out)
out = self.relu(out)
out = me.cat(out, out_b1p1)
return self.final(out)
class ResUNet14(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (1, 1, 1, 1, 1, 1)
class ResUNet18(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (2, 2, 2, 2, 2, 2)
class ResUNet18INBN(ResUNet18):
NORM_TYPE = NormType.INSTANCE_BATCH_NORM
BLOCK = BasicBlockINBN
class ResUNet34(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (3, 4, 6, 3, 2, 2)
class ResUNet50(MinkUNetBase):
BLOCK = Bottleneck
LAYERS = (3, 4, 6, 3, 2, 2)
class ResUNet101(MinkUNetBase):
BLOCK = Bottleneck
LAYERS = (3, 4, 23, 3, 2, 2)
class ResUNet14D(ResUNet14):
PLANES = (64, 128, 256, 512, 512, 512, 512)
class ResUNet18D(ResUNet18):
PLANES = (64, 128, 256, 512, 512, 512, 512)
class ResUNet34D(ResUNet34):
PLANES = (64, 128, 256, 512, 512, 512, 512)
class ResUNet34E(ResUNet34):
INIT_DIM = 32
PLANES = (32, 64, 128, 256, 128, 64, 64)
class ResUNet34F(ResUNet34):
INIT_DIM = 32
PLANES = (32, 64, 128, 256, 128, 64, 32)
class MinkUNetHyper(MinkUNetBase):
BLOCK = None
PLANES = (64, 128, 256, 512, 256, 128, 128)
DILATIONS = (1, 1, 1, 1, 1, 1)
LAYERS = (2, 2, 2, 2, 2, 2)
INIT_DIM = 64
OUT_PIXEL_DIST = 1
NORM_TYPE = NormType.BATCH_NORM
NON_BLOCK_CONV_TYPE = ConvType.SPATIAL_HYPERCUBE
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS
# To use the model, must call initialize_coords before forward pass.
# Once data is processed, call clear to reset the model before calling initialize_coords
def __init__(self, in_channels, out_channels, config, D=3, **kwargs):
super(MinkUNetBase, self).__init__(in_channels, out_channels, config, D)
def network_initialization(self, in_channels, out_channels, config, D):
# Setup net_metadata
dilations = self.DILATIONS
bn_momentum = config.bn_momentum
def space_n_time_m(n, m):
return n if D == 3 else [n, n, n, m]
if D == 4:
self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1)
# Output of the first conv concated to conv6
self.inplanes = self.INIT_DIM
self.conv1p1s1 = conv(
in_channels,
self.inplanes,
kernel_size=space_n_time_m(config.conv1_kernel_size, 1),
stride=1,
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn1 = get_norm(self.NORM_TYPE, self.PLANES[0], D, bn_momentum=bn_momentum)
self.block1 = self._make_layer(
self.BLOCK,
self.PLANES[0],
self.LAYERS[0],
dilation=dilations[0],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv2p1s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn2 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block2 = self._make_layer(
self.BLOCK,
self.PLANES[1],
self.LAYERS[1],
dilation=dilations[1],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv3p2s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn3 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block3 = self._make_layer(
self.BLOCK,
self.PLANES[2],
self.LAYERS[2],
dilation=dilations[2],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv4p4s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn4 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block4 = self._make_layer(
self.BLOCK,
self.PLANES[3],
self.LAYERS[3],
dilation=dilations[3],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.pool_tr4 = ME.MinkowskiPoolingTranspose(kernel_size=8, stride=8, dimension=D)
out_pool4 = self.inplanes
self.convtr4p8s2 = conv_tr(
self.inplanes,
self.PLANES[4],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr4 = get_norm(self.NORM_TYPE, self.PLANES[4], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion
self.block5 = self._make_layer(
self.BLOCK,
self.PLANES[4],
self.LAYERS[4],
dilation=dilations[4],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.pool_tr5 = ME.MinkowskiPoolingTranspose(kernel_size=4, stride=4, dimension=D)
out_pool5 = self.inplanes
self.convtr5p4s2 = conv_tr(
self.inplanes,
self.PLANES[5],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr5 = get_norm(self.NORM_TYPE, self.PLANES[5], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion
self.block6 = self._make_layer(
self.BLOCK,
self.PLANES[5],
self.LAYERS[5],
dilation=dilations[5],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.pool_tr6 = ME.MinkowskiPoolingTranspose(kernel_size=2, stride=2, dimension=D)
out_pool6 = self.inplanes
self.convtr6p2s2 = conv_tr(
self.inplanes,
self.PLANES[6],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr6 = get_norm(self.NORM_TYPE, self.PLANES[6], D, bn_momentum=bn_momentum)
self.relu = MinkowskiReLU(inplace=True)
self.final = nn.Sequential(
conv(
out_pool5 + out_pool6 + self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion,
512,
kernel_size=1,
bias=False,
D=D), ME.MinkowskiBatchNorm(512), ME.MinkowskiReLU(),
conv(512, out_channels, kernel_size=1, bias=True, D=D))
def forward(self, x):
out = self.conv1p1s1(x)
out = self.bn1(out)
out = self.relu(out)
out_b1p1 = self.block1(out)
out = self.conv2p1s2(out_b1p1)
out = self.bn2(out)
out = self.relu(out)
out_b2p2 = self.block2(out)
out = self.conv3p2s2(out_b2p2)
out = self.bn3(out)
out = self.relu(out)
out_b3p4 = self.block3(out)
out = self.conv4p4s2(out_b3p4)
out = self.bn4(out)
out = self.relu(out)
# pixel_dist=8
out = self.block4(out)
out = self.convtr4p8s2(out)
out = self.bntr4(out)
out = self.relu(out)
out = me.cat(out, out_b3p4)
out = self.block5(out)
out_5 = self.pool_tr5(out)
out = self.convtr5p4s2(out)
out = self.bntr5(out)
out = self.relu(out)
out = me.cat(out, out_b2p2)
out = self.block6(out)
out_6 = self.pool_tr6(out)
out = self.convtr6p2s2(out)
out = self.bntr6(out)
out = self.relu(out)
out = me.cat(out, out_b1p1, out_6, out_5)
return self.final(out)
class MinkUNetHyper14INBN(MinkUNetHyper):
NORM_TYPE = NormType.INSTANCE_BATCH_NORM
BLOCK = BasicBlockINBN
class STMinkUNetBase(MinkUNetBase):
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS
def __init__(self, in_channels, out_channels, config, D=4, **kwargs):
super(STMinkUNetBase, self).__init__(in_channels, out_channels, config, D, **kwargs)
class STResUNet14(STMinkUNetBase, ResUNet14):
pass
class STResUNet18(STMinkUNetBase, ResUNet18):
pass
class STResUNet34(STMinkUNetBase, ResUNet34):
pass
class STResUNet50(STMinkUNetBase, ResUNet50):
pass
class STResUNet101(STMinkUNetBase, ResUNet101):
pass
class STResTesseractUNetBase(STMinkUNetBase):
CONV_TYPE = ConvType.HYPERCUBE
class STResTesseractUNet14(STResTesseractUNetBase, ResUNet14):
pass
class STResTesseractUNet18(STResTesseractUNetBase, ResUNet18):
pass
class STResTesseractUNet34(STResTesseractUNetBase, ResUNet34):
pass
class STResTesseractUNet50(STResTesseractUNetBase, ResUNet50):
pass
class STResTesseractUNet101(STResTesseractUNetBase, ResUNet101):
pass
| ContrastiveSceneContexts-main | downstream/votenet/models/backbone/sparseconv/models_sparseconv/resunet.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from models.backbone.sparseconv.models_sparseconv import resunet as resunet
from models.backbone.sparseconv.models_sparseconv import res16unet as res16unet
# from models.trilateral_crf import TrilateralCRF
from models.backbone.sparseconv.models_sparseconv.conditional_random_fields import BilateralCRF, TrilateralCRF
MODELS = []
def add_models(module):
MODELS.extend([getattr(module, a) for a in dir(module) if 'Net' in a])
add_models(resunet)
add_models(res16unet)
WRAPPERS = [BilateralCRF, TrilateralCRF]
def get_models():
'''Returns a tuple of sample models.'''
return MODELS
def get_wrappers():
return WRAPPERS
def load_model(name):
'''Creates and returns an instance of the model given its class name.
'''
# Find the model class from its name
all_models = get_models()
mdict = {model.__name__: model for model in all_models}
if name not in mdict:
print('Invalid model index. Options are:')
# Display a list of valid model names
for model in all_models:
print('\t* {}'.format(model.__name__))
return None
NetClass = mdict[name]
return NetClass
def load_wrapper(name):
'''Creates and returns an instance of the model given its class name.
'''
# Find the model class from its name
all_wrappers = get_wrappers()
mdict = {wrapper.__name__: wrapper for wrapper in all_wrappers}
if name not in mdict:
print('Invalid wrapper index. Options are:')
# Display a list of valid model names
for wrapper in all_wrappers:
print('\t* {}'.format(wrapper.__name__))
return None
WrapperClass = mdict[name]
return WrapperClass
| ContrastiveSceneContexts-main | downstream/votenet/models/backbone/sparseconv/models_sparseconv/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from models.backbone.sparseconv.models_sparseconv.resnet import ResNetBase, get_norm
from models.backbone.sparseconv.models_sparseconv.modules.common import ConvType, NormType, conv, conv_tr
from models.backbone.sparseconv.models_sparseconv.modules.resnet_block import BasicBlock, Bottleneck
from MinkowskiEngine import MinkowskiReLU
import MinkowskiEngine.MinkowskiOps as me
class Res16UNetBase(ResNetBase):
BLOCK = None
PLANES = (32, 64, 128, 256, 256, 256, 256, 256)
DILATIONS = (1, 1, 1, 1, 1, 1, 1, 1)
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
INIT_DIM = 32
OUT_PIXEL_DIST = 1
NORM_TYPE = NormType.BATCH_NORM
NON_BLOCK_CONV_TYPE = ConvType.SPATIAL_HYPERCUBE
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS
# To use the model, must call initialize_coords before forward pass.
# Once data is processed, call clear to reset the model before calling initialize_coords
def __init__(self, in_channels, out_channels, config, D=3, **kwargs):
super(Res16UNetBase, self).__init__(in_channels, out_channels, config, D)
def network_initialization(self, in_channels, out_channels, config, D):
# Setup net_metadata
dilations = self.DILATIONS
bn_momentum = config.bn_momentum
def space_n_time_m(n, m):
return n if D == 3 else [n, n, n, m]
if D == 4:
self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1)
# Output of the first conv concated to conv6
self.inplanes = self.INIT_DIM
self.conv0p1s1 = conv(
in_channels,
self.inplanes,
kernel_size=space_n_time_m(config.conv1_kernel_size, 1),
stride=1,
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn0 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.conv1p1s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn1 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block1 = self._make_layer(
self.BLOCK,
self.PLANES[0],
self.LAYERS[0],
dilation=dilations[0],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv2p2s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn2 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block2 = self._make_layer(
self.BLOCK,
self.PLANES[1],
self.LAYERS[1],
dilation=dilations[1],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv3p4s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn3 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block3 = self._make_layer(
self.BLOCK,
self.PLANES[2],
self.LAYERS[2],
dilation=dilations[2],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv4p8s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn4 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block4 = self._make_layer(
self.BLOCK,
self.PLANES[3],
self.LAYERS[3],
dilation=dilations[3],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr4p16s2 = conv_tr(
self.inplanes,
self.PLANES[4],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr4 = get_norm(self.NORM_TYPE, self.PLANES[4], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion
self.block5 = self._make_layer(
self.BLOCK,
self.PLANES[4],
self.LAYERS[4],
dilation=dilations[4],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr5p8s2 = conv_tr(
self.inplanes,
self.PLANES[5],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr5 = get_norm(self.NORM_TYPE, self.PLANES[5], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion
self.block6 = self._make_layer(
self.BLOCK,
self.PLANES[5],
self.LAYERS[5],
dilation=dilations[5],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr6p4s2 = conv_tr(
self.inplanes,
self.PLANES[6],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr6 = get_norm(self.NORM_TYPE, self.PLANES[6], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion
self.block7 = self._make_layer(
self.BLOCK,
self.PLANES[6],
self.LAYERS[6],
dilation=dilations[6],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr7p2s2 = conv_tr(
self.inplanes,
self.PLANES[7],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr7 = get_norm(self.NORM_TYPE, self.PLANES[7], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[7] + self.INIT_DIM
self.block8 = self._make_layer(
self.BLOCK,
self.PLANES[7],
self.LAYERS[7],
dilation=dilations[7],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.final = conv(self.PLANES[7], out_channels, kernel_size=1, stride=1, bias=True, D=D)
self.relu = MinkowskiReLU(inplace=True)
def forward(self, x):
out = self.conv0p1s1(x)
out = self.bn0(out)
out_p1 = self.relu(out)
out = self.conv1p1s2(out_p1)
out = self.bn1(out)
out = self.relu(out)
out_b1p2 = self.block1(out)
out = self.conv2p2s2(out_b1p2)
out = self.bn2(out)
out = self.relu(out)
out_b2p4 = self.block2(out)
out = self.conv3p4s2(out_b2p4)
out = self.bn3(out)
out = self.relu(out)
out_b3p8 = self.block3(out)
# pixel_dist=16
out = self.conv4p8s2(out_b3p8)
out = self.bn4(out)
out = self.relu(out)
out = self.block4(out)
# pixel_dist=8
out = self.convtr4p16s2(out)
out = self.bntr4(out)
out = self.relu(out)
out = me.cat(out, out_b3p8)
out = self.block5(out)
# pixel_dist=4
out = self.convtr5p8s2(out)
out = self.bntr5(out)
out = self.relu(out)
out = me.cat(out, out_b2p4)
out = self.block6(out)
# pixel_dist=2
out = self.convtr6p4s2(out)
out = self.bntr6(out)
out = self.relu(out)
out = me.cat(out, out_b1p2)
out = self.block7(out)
# pixel_dist=1
out = self.convtr7p2s2(out)
out = self.bntr7(out)
out = self.relu(out)
out = me.cat(out, out_p1)
out = self.block8(out)
return self.final(out)
class Res16UNet14(Res16UNetBase):
BLOCK = BasicBlock
LAYERS = (1, 1, 1, 1, 1, 1, 1, 1)
class Res16UNet18(Res16UNetBase):
BLOCK = BasicBlock
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
class Res16UNet34(Res16UNetBase):
BLOCK = BasicBlock
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class Res16UNet50(Res16UNetBase):
BLOCK = Bottleneck
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class Res16UNet101(Res16UNetBase):
BLOCK = Bottleneck
LAYERS = (2, 3, 4, 23, 2, 2, 2, 2)
class Res16UNet14A(Res16UNet14):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class Res16UNet14A2(Res16UNet14A):
LAYERS = (1, 1, 1, 1, 2, 2, 2, 2)
class Res16UNet14B(Res16UNet14):
PLANES = (32, 64, 128, 256, 128, 128, 128, 128)
class Res16UNet14B2(Res16UNet14B):
LAYERS = (1, 1, 1, 1, 2, 2, 2, 2)
class Res16UNet14B3(Res16UNet14B):
LAYERS = (2, 2, 2, 2, 1, 1, 1, 1)
class Res16UNet14C(Res16UNet14):
PLANES = (32, 64, 128, 256, 192, 192, 128, 128)
class Res16UNet14D(Res16UNet14):
PLANES = (32, 64, 128, 256, 384, 384, 384, 384)
class Res16UNet18A(Res16UNet18):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class Res16UNet18B(Res16UNet18):
PLANES = (32, 64, 128, 256, 128, 128, 128, 128)
class Res16UNet18D(Res16UNet18):
PLANES = (32, 64, 128, 256, 384, 384, 384, 384)
class Res16UNet34A(Res16UNet34):
PLANES = (32, 64, 128, 256, 256, 128, 64, 64)
class Res16UNet34B(Res16UNet34):
PLANES = (32, 64, 128, 256, 256, 128, 64, 32)
class Res16UNet34C(Res16UNet34):
PLANES = (32, 64, 128, 256, 256, 128, 96, 96)
class STRes16UNetBase(Res16UNetBase):
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS
def __init__(self, in_channels, out_channels, config, D=4, **kwargs):
super(STRes16UNetBase, self).__init__(in_channels, out_channels, config, D, **kwargs)
class STRes16UNet14(STRes16UNetBase, Res16UNet14):
pass
class STRes16UNet14A(STRes16UNetBase, Res16UNet14A):
pass
class STRes16UNet18(STRes16UNetBase, Res16UNet18):
pass
class STRes16UNet34(STRes16UNetBase, Res16UNet34):
pass
class STRes16UNet50(STRes16UNetBase, Res16UNet50):
pass
class STRes16UNet101(STRes16UNetBase, Res16UNet101):
pass
class STRes16UNet18A(STRes16UNet18):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class STResTesseract16UNetBase(STRes16UNetBase):
CONV_TYPE = ConvType.HYPERCUBE
class STResTesseract16UNet18A(STRes16UNet18A, STResTesseract16UNetBase):
pass
| ContrastiveSceneContexts-main | downstream/votenet/models/backbone/sparseconv/models_sparseconv/res16unet.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from MinkowskiEngine import MinkowskiNetwork
class Model(MinkowskiNetwork):
"""
Base network for all sparse convnet
By default, all networks are segmentation networks.
"""
OUT_PIXEL_DIST = -1
def __init__(self, in_channels, out_channels, config, D, **kwargs):
super(Model, self).__init__(D)
self.in_channels = in_channels
self.out_channels = out_channels
self.config = config
class HighDimensionalModel(Model):
"""
Base network for all spatio (temporal) chromatic sparse convnet
"""
def __init__(self, in_channels, out_channels, config, D, **kwargs):
assert D > 4, "Num dimension smaller than 5"
super(HighDimensionalModel, self).__init__(in_channels, out_channels, config, D, **kwargs)
| ContrastiveSceneContexts-main | downstream/votenet/models/backbone/sparseconv/models_sparseconv/model.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
import MinkowskiEngine as ME
from models.backbone.sparseconv.models_sparseconv.model import Model
from models.backbone.sparseconv.models_sparseconv.modules.common import ConvType, NormType, get_norm, conv, sum_pool
from models.backbone.sparseconv.models_sparseconv.modules.resnet_block import BasicBlock, Bottleneck
class ResNetBase(Model):
BLOCK = None
LAYERS = ()
INIT_DIM = 64
PLANES = (64, 128, 256, 512)
OUT_PIXEL_DIST = 32
HAS_LAST_BLOCK = False
CONV_TYPE = ConvType.HYPERCUBE
def __init__(self, in_channels, out_channels, config, D=3, **kwargs):
assert self.BLOCK is not None
assert self.OUT_PIXEL_DIST > 0
super(ResNetBase, self).__init__(in_channels, out_channels, config, D, **kwargs)
self.network_initialization(in_channels, out_channels, config, D)
self.weight_initialization()
def network_initialization(self, in_channels, out_channels, config, D):
def space_n_time_m(n, m):
return n if D == 3 else [n, n, n, m]
if D == 4:
self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1)
dilations = config.dilations
bn_momentum = config.bn_momentum
self.inplanes = self.INIT_DIM
self.conv1 = conv(
in_channels,
self.inplanes,
kernel_size=space_n_time_m(config.conv1_kernel_size, 1),
stride=1,
D=D)
self.bn1 = get_norm(NormType.BATCH_NORM, self.inplanes, D=self.D, bn_momentum=bn_momentum)
self.relu = ME.MinkowskiReLU(inplace=True)
self.pool = sum_pool(kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), D=D)
self.layer1 = self._make_layer(
self.BLOCK,
self.PLANES[0],
self.LAYERS[0],
stride=space_n_time_m(2, 1),
dilation=space_n_time_m(dilations[0], 1))
self.layer2 = self._make_layer(
self.BLOCK,
self.PLANES[1],
self.LAYERS[1],
stride=space_n_time_m(2, 1),
dilation=space_n_time_m(dilations[1], 1))
self.layer3 = self._make_layer(
self.BLOCK,
self.PLANES[2],
self.LAYERS[2],
stride=space_n_time_m(2, 1),
dilation=space_n_time_m(dilations[2], 1))
self.layer4 = self._make_layer(
self.BLOCK,
self.PLANES[3],
self.LAYERS[3],
stride=space_n_time_m(2, 1),
dilation=space_n_time_m(dilations[3], 1))
self.final = conv(
self.PLANES[3] * self.BLOCK.expansion, out_channels, kernel_size=1, bias=True, D=D)
def weight_initialization(self):
for m in self.modules():
if isinstance(m, ME.MinkowskiBatchNorm):
nn.init.constant_(m.bn.weight, 1)
nn.init.constant_(m.bn.bias, 0)
def _make_layer(self,
block,
planes,
blocks,
stride=1,
dilation=1,
norm_type=NormType.BATCH_NORM,
bn_momentum=0.1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False,
D=self.D),
get_norm(norm_type, planes * block.expansion, D=self.D, bn_momentum=bn_momentum),
)
layers = []
layers.append(
block(
self.inplanes,
planes,
stride=stride,
dilation=dilation,
downsample=downsample,
conv_type=self.CONV_TYPE,
D=self.D))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
stride=1,
dilation=dilation,
conv_type=self.CONV_TYPE,
D=self.D))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.pool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.final(x)
return x
class ResNet14(ResNetBase):
BLOCK = BasicBlock
LAYERS = (1, 1, 1, 1)
class ResNet18(ResNetBase):
BLOCK = BasicBlock
LAYERS = (2, 2, 2, 2)
class ResNet34(ResNetBase):
BLOCK = BasicBlock
LAYERS = (3, 4, 6, 3)
class ResNet50(ResNetBase):
BLOCK = Bottleneck
LAYERS = (3, 4, 6, 3)
class ResNet101(ResNetBase):
BLOCK = Bottleneck
LAYERS = (3, 4, 23, 3)
class STResNetBase(ResNetBase):
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS
def __init__(self, in_channels, out_channels, config, D=4, **kwargs):
super(STResNetBase, self).__init__(in_channels, out_channels, config, D, **kwargs)
class STResNet14(STResNetBase, ResNet14):
pass
class STResNet18(STResNetBase, ResNet18):
pass
class STResNet34(STResNetBase, ResNet34):
pass
class STResNet50(STResNetBase, ResNet50):
pass
class STResNet101(STResNetBase, ResNet101):
pass
class STResTesseractNetBase(STResNetBase):
CONV_TYPE = ConvType.HYPERCUBE
class STResTesseractNet14(STResTesseractNetBase, STResNet14):
pass
class STResTesseractNet18(STResTesseractNetBase, STResNet18):
pass
class STResTesseractNet34(STResTesseractNetBase, STResNet34):
pass
class STResTesseractNet50(STResTesseractNetBase, STResNet50):
pass
class STResTesseractNet101(STResTesseractNetBase, STResNet101):
pass
| ContrastiveSceneContexts-main | downstream/votenet/models/backbone/sparseconv/models_sparseconv/resnet.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from torch.autograd import Variable
from MinkowskiEngine import SparseTensor, MinkowskiConvolution, MinkowskiConvolutionFunction, convert_to_int_tensor
from MinkowskiEngine import convert_region_type as me_convert_region_type
from models.backbone.sparseconv.lib.math_functions import SparseMM
from models.backbone.sparseconv.models_sparseconv.model import HighDimensionalModel
from models.backbone.sparseconv.models_sparseconv.wrapper import Wrapper
from models.backbone.sparseconv.models_sparseconv.modules.common import convert_region_type
class MeanField(HighDimensionalModel):
"""
Abstract class for the bilateral and trilateral meanfield
"""
OUT_PIXEL_DIST = 1
# To use the model, must call initialize_coords before forward pass.
# Once data is processed, call clear to reset the model before calling
# initialize_coords
def __init__(self, nchannels, spatial_sigma, chromatic_sigma, meanfield_iterations, is_temporal,
config, **kwargs):
D = 7 if is_temporal else 6
self.is_temporal = is_temporal
# Setup metadata
super(MeanField, self).__init__(nchannels, nchannels, config, D=D)
self.spatial_sigma = spatial_sigma
self.chromatic_sigma = chromatic_sigma
# temporal sigma is 1
self.meanfield_iterations = meanfield_iterations
self.pixel_dist = 1
self.stride = 1
self.dilation = 1
conv = MinkowskiConvolution(
nchannels,
nchannels,
kernel_size=config.wrapper_kernel_size,
has_bias=False,
region_type=convert_region_type(config.wrapper_region_type),
dimension=D)
# Create a region_offset
self.region_type_, self.region_offset_, _ = me_convert_region_type(
conv.region_type, 1, conv.kernel_size, conv.up_stride, conv.dilation, conv.region_offset,
conv.axis_types, conv.dimension)
# Check whether the mapping is required
self.requires_mapping = False
self.conv = conv
self.kernel = conv.kernel
self.convs = {}
self.softmaxes = {}
for i in range(self.meanfield_iterations):
self.softmaxes[i] = nn.Softmax(dim=1)
self.convs[i] = MinkowskiConvolutionFunction()
def initialize_coords(self, model, in_coords, in_color):
if torch.prod(convert_to_int_tensor(model.OUT_PIXEL_DIST, model.D)) != 1:
self.requires_mapping = True
out_coords = model.get_coords(model.OUT_PIXEL_DIST)
out_color = model.permute_feature(in_color, model.OUT_PIXEL_DIST).int()
# Tri/Bi-lateral grid
out_tri_coords = torch.cat(
[
(torch.floor(out_coords[:, :3].float() / self.spatial_sigma)).int(),
(torch.floor(out_color.float() / self.chromatic_sigma)).int(),
out_coords[:, 3:] # (time and) batch
],
dim=1)
orig_tri_coords = torch.cat(
[
(torch.floor(in_coords[:, :3].float() / self.spatial_sigma)).int(),
(torch.floor(in_color.float() / self.chromatic_sigma)).int(),
in_coords[:, 3:] # (time and) batch
],
dim=1)
crf_tri_coords = torch.cat((out_tri_coords, orig_tri_coords), dim=0)
# Create a trilateral Grid
# super(MeanField, self).initialize_coords_with_duplicates(crf_tri_coords)
# Create Sparse matrix mappings to/from the CRF coords
in_cols = self.get_index_map(out_tri_coords, 1)
self.in_mapping = torch.sparse.FloatTensor(
torch.stack((in_cols.long(), torch.arange(in_cols.size(0), out=torch.LongTensor()))),
torch.ones(in_cols.size(0)), torch.Size((self.n_rows, in_cols.size(0))))
out_cols = self.get_index_map(orig_tri_coords, 1)
self.out_mapping = torch.sparse.FloatTensor(
torch.stack((torch.arange(out_cols.size(0), out=torch.LongTensor()), out_cols.long())),
torch.ones(out_cols.size(0)), torch.Size((out_cols.size(0), self.n_rows)))
if self.config.is_cuda:
self.in_mapping, self.out_mapping = self.in_mapping.cuda(), self.out_mapping.cuda()
else:
self.requires_mapping = False
out_coords = in_coords
out_color = in_color
crf_tri_coords = torch.cat(
[
(torch.floor(in_coords[:, :3].float() / self.spatial_sigma)).int(),
(torch.floor(in_color.float() / self.chromatic_sigma)).int(),
in_coords[:, 3:], # (time and) batch
],
dim=1)
return crf_tri_coords
def forward(self, x):
xf = x.F
if self.requires_mapping:
# Map the network output to CRF input
xf = SparseMM()(Variable(self.in_mapping), xf)
out = xf
for i in range(self.meanfield_iterations): # Meanfield iteration
# Normalization
out = self.softmaxes[i](out)
# Pairwise potential
out = self.convs[i].apply(out, self.conv.kernel, x.pixel_dist, self.conv.stride,
self.conv.kernel_size, self.conv.dilation, self.region_type_,
self.region_offset_, x.coords_key, x.coords_key, x.C)
# Add unary
out += xf
if self.requires_mapping:
# Map the CRF output to the origianl space
out = SparseMM()(Variable(self.out_mapping), out)
return SparseTensor(out, coords_key=x.coords_key, coords_manager=x.C)
class BilateralCRF(Wrapper):
OUT_PIXEL_DIST = 1
def initialize_filter(self, NetClass, in_nchannel, out_nchannel, config):
self.model = NetClass(in_nchannel, out_nchannel, config)
self.filter = MeanField(
out_nchannel,
spatial_sigma=config.crf_spatial_sigma,
chromatic_sigma=config.crf_chromatic_sigma,
meanfield_iterations=config.meanfield_iterations,
is_temporal=False,
config=config)
class TrilateralCRF(Wrapper):
OUT_PIXEL_DIST = 1
def initialize_filter(self, NetClass, in_nchannel, out_nchannel, config):
self.model = NetClass(in_nchannel, out_nchannel, config)
self.filter = MeanField(
out_nchannel,
spatial_sigma=config.crf_spatial_sigma,
chromatic_sigma=config.crf_chromatic_sigma,
meanfield_iterations=config.meanfield_iterations,
is_temporal=True,
config=config)
| ContrastiveSceneContexts-main | downstream/votenet/models/backbone/sparseconv/models_sparseconv/conditional_random_fields.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.