python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
import torch.nn as nn
from models.backbone.sparseconv.models_sparseconv.modules.common import ConvType, NormType, get_norm, conv
from MinkowskiEngine import MinkowskiReLU
class BasicBlockBase(nn.Module):
expansion = 1
NORM_TYPE = NormType.BATCH_NORM
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
conv_type=ConvType.HYPERCUBE,
bn_momentum=0.1,
D=3):
super(BasicBlockBase, self).__init__()
self.conv1 = conv(
inplanes, planes, kernel_size=3, stride=stride, dilation=dilation, conv_type=conv_type, D=D)
self.norm1 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum)
self.conv2 = conv(
planes,
planes,
kernel_size=3,
stride=1,
dilation=dilation,
bias=False,
conv_type=conv_type,
D=D)
self.norm2 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum)
self.relu = MinkowskiReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class BasicBlock(BasicBlockBase):
NORM_TYPE = NormType.BATCH_NORM
class BasicBlockIN(BasicBlockBase):
NORM_TYPE = NormType.INSTANCE_NORM
class BasicBlockINBN(BasicBlockBase):
NORM_TYPE = NormType.INSTANCE_BATCH_NORM
class BottleneckBase(nn.Module):
expansion = 4
NORM_TYPE = NormType.BATCH_NORM
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
conv_type=ConvType.HYPERCUBE,
bn_momentum=0.1,
D=3):
super(BottleneckBase, self).__init__()
self.conv1 = conv(inplanes, planes, kernel_size=1, D=D)
self.norm1 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum)
self.conv2 = conv(
planes, planes, kernel_size=3, stride=stride, dilation=dilation, conv_type=conv_type, D=D)
self.norm2 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum)
self.conv3 = conv(planes, planes * self.expansion, kernel_size=1, D=D)
self.norm3 = get_norm(self.NORM_TYPE, planes * self.expansion, D, bn_momentum=bn_momentum)
self.relu = MinkowskiReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.norm3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(BottleneckBase):
NORM_TYPE = NormType.BATCH_NORM
class BottleneckIN(BottleneckBase):
NORM_TYPE = NormType.INSTANCE_NORM
class BottleneckINBN(BottleneckBase):
NORM_TYPE = NormType.INSTANCE_BATCH_NORM
| ContrastiveSceneContexts-main | downstream/votenet/models/backbone/sparseconv/models_sparseconv/modules/resnet_block.py |
import torch.nn as nn
import MinkowskiEngine as ME
from models.modules.common import ConvType, NormType
from models.modules.resnet_block import BasicBlock, Bottleneck
class SELayer(nn.Module):
def __init__(self, channel, reduction=16, D=-1):
# Global coords does not require coords_key
super(SELayer, self).__init__()
self.fc = nn.Sequential(
ME.MinkowskiLinear(channel, channel // reduction), ME.MinkowskiReLU(inplace=True),
ME.MinkowskiLinear(channel // reduction, channel), ME.MinkowskiSigmoid())
self.pooling = ME.MinkowskiGlobalPooling(dimension=D)
self.broadcast_mul = ME.MinkowskiBroadcastMultiplication(dimension=D)
def forward(self, x):
y = self.pooling(x)
y = self.fc(y)
return self.broadcast_mul(x, y)
class SEBasicBlock(BasicBlock):
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
conv_type=ConvType.HYPERCUBE,
reduction=16,
D=-1):
super(SEBasicBlock, self).__init__(
inplanes,
planes,
stride=stride,
dilation=dilation,
downsample=downsample,
conv_type=conv_type,
D=D)
self.se = SELayer(planes, reduction=reduction, D=D)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.se(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class SEBasicBlockSN(SEBasicBlock):
NORM_TYPE = NormType.SPARSE_SWITCH_NORM
class SEBasicBlockIN(SEBasicBlock):
NORM_TYPE = NormType.SPARSE_INSTANCE_NORM
class SEBasicBlockLN(SEBasicBlock):
NORM_TYPE = NormType.SPARSE_LAYER_NORM
class SEBottleneck(Bottleneck):
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
conv_type=ConvType.HYPERCUBE,
D=3,
reduction=16):
super(SEBottleneck, self).__init__(
inplanes,
planes,
stride=stride,
dilation=dilation,
downsample=downsample,
conv_type=conv_type,
D=D)
self.se = SELayer(planes * self.expansion, reduction=reduction, D=D)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.norm3(out)
out = self.se(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class SEBottleneckSN(SEBottleneck):
NORM_TYPE = NormType.SPARSE_SWITCH_NORM
class SEBottleneckIN(SEBottleneck):
NORM_TYPE = NormType.SPARSE_INSTANCE_NORM
class SEBottleneckLN(SEBottleneck):
NORM_TYPE = NormType.SPARSE_LAYER_NORM
| ContrastiveSceneContexts-main | downstream/votenet/models/backbone/sparseconv/models_sparseconv/modules/senet_block.py |
ContrastiveSceneContexts-main | downstream/votenet/models/backbone/sparseconv/models_sparseconv/modules/__init__.py |
|
import collections
from enum import Enum
import torch.nn as nn
import MinkowskiEngine as ME
class NormType(Enum):
BATCH_NORM = 0
INSTANCE_NORM = 1
INSTANCE_BATCH_NORM = 2
def get_norm(norm_type, n_channels, D, bn_momentum=0.1):
if norm_type == NormType.BATCH_NORM:
return ME.MinkowskiBatchNorm(n_channels, momentum=bn_momentum)
elif norm_type == NormType.INSTANCE_NORM:
return ME.MinkowskiInstanceNorm(n_channels)
elif norm_type == NormType.INSTANCE_BATCH_NORM:
return nn.Sequential(
ME.MinkowskiInstanceNorm(n_channels),
ME.MinkowskiBatchNorm(n_channels, momentum=bn_momentum))
else:
raise ValueError(f'Norm type: {norm_type} not supported')
class ConvType(Enum):
"""
Define the kernel region type
"""
HYPERCUBE = 0, 'HYPERCUBE'
SPATIAL_HYPERCUBE = 1, 'SPATIAL_HYPERCUBE'
SPATIO_TEMPORAL_HYPERCUBE = 2, 'SPATIO_TEMPORAL_HYPERCUBE'
HYPERCROSS = 3, 'HYPERCROSS'
SPATIAL_HYPERCROSS = 4, 'SPATIAL_HYPERCROSS'
SPATIO_TEMPORAL_HYPERCROSS = 5, 'SPATIO_TEMPORAL_HYPERCROSS'
SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS = 6, 'SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS '
def __new__(cls, value, name):
member = object.__new__(cls)
member._value_ = value
member.fullname = name
return member
def __int__(self):
return self.value
# Covert the ConvType var to a RegionType var
conv_to_region_type = {
# kernel_size = [k, k, k, 1]
ConvType.HYPERCUBE: ME.RegionType.HYPERCUBE,
ConvType.SPATIAL_HYPERCUBE: ME.RegionType.HYPERCUBE,
ConvType.SPATIO_TEMPORAL_HYPERCUBE: ME.RegionType.HYPERCUBE,
ConvType.HYPERCROSS: ME.RegionType.HYPERCROSS,
ConvType.SPATIAL_HYPERCROSS: ME.RegionType.HYPERCROSS,
ConvType.SPATIO_TEMPORAL_HYPERCROSS: ME.RegionType.HYPERCROSS,
ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS: ME.RegionType.HYBRID
}
int_to_region_type = {m.value: m for m in ME.RegionType}
def convert_region_type(region_type):
"""
Convert the integer region_type to the corresponding RegionType enum object.
"""
return int_to_region_type[region_type]
def convert_conv_type(conv_type, kernel_size, D):
assert isinstance(conv_type, ConvType), "conv_type must be of ConvType"
region_type = conv_to_region_type[conv_type]
axis_types = None
if conv_type == ConvType.SPATIAL_HYPERCUBE:
# No temporal convolution
if isinstance(kernel_size, collections.Sequence):
kernel_size = kernel_size[:3]
else:
kernel_size = [
kernel_size,
] * 3
if D == 4:
kernel_size.append(1)
elif conv_type == ConvType.SPATIO_TEMPORAL_HYPERCUBE:
# conv_type conversion already handled
assert D == 4
elif conv_type == ConvType.HYPERCUBE:
# conv_type conversion already handled
pass
elif conv_type == ConvType.SPATIAL_HYPERCROSS:
if isinstance(kernel_size, collections.Sequence):
kernel_size = kernel_size[:3]
else:
kernel_size = [
kernel_size,
] * 3
if D == 4:
kernel_size.append(1)
elif conv_type == ConvType.HYPERCROSS:
# conv_type conversion already handled
pass
elif conv_type == ConvType.SPATIO_TEMPORAL_HYPERCROSS:
# conv_type conversion already handled
assert D == 4
elif conv_type == ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS:
# Define the CUBIC conv kernel for spatial dims and CROSS conv for temp dim
axis_types = [
ME.RegionType.HYPERCUBE,
] * 3
if D == 4:
axis_types.append(ME.RegionType.HYPERCROSS)
return region_type, axis_types, kernel_size
def conv(in_planes,
out_planes,
kernel_size,
stride=1,
dilation=1,
bias=False,
conv_type=ConvType.HYPERCUBE,
D=-1):
assert D > 0, 'Dimension must be a positive integer'
region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D)
kernel_generator = ME.KernelGenerator(
kernel_size, stride, dilation, region_type=region_type, axis_types=axis_types, dimension=D)
return ME.MinkowskiConvolution(
in_channels=in_planes,
out_channels=out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
has_bias=bias,
kernel_generator=kernel_generator,
dimension=D)
def conv_tr(in_planes,
out_planes,
kernel_size,
upsample_stride=1,
dilation=1,
bias=False,
conv_type=ConvType.HYPERCUBE,
D=-1):
assert D > 0, 'Dimension must be a positive integer'
region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D)
kernel_generator = ME.KernelGenerator(
kernel_size,
upsample_stride,
dilation,
region_type=region_type,
axis_types=axis_types,
dimension=D)
return ME.MinkowskiConvolutionTranspose(
in_channels=in_planes,
out_channels=out_planes,
kernel_size=kernel_size,
stride=upsample_stride,
dilation=dilation,
has_bias=bias,
kernel_generator=kernel_generator,
dimension=D)
def avg_pool(kernel_size,
stride=1,
dilation=1,
conv_type=ConvType.HYPERCUBE,
in_coords_key=None,
D=-1):
assert D > 0, 'Dimension must be a positive integer'
region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D)
kernel_generator = ME.KernelGenerator(
kernel_size, stride, dilation, region_type=region_type, axis_types=axis_types, dimension=D)
return ME.MinkowskiAvgPooling(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
kernel_generator=kernel_generator,
dimension=D)
def avg_unpool(kernel_size, stride=1, dilation=1, conv_type=ConvType.HYPERCUBE, D=-1):
assert D > 0, 'Dimension must be a positive integer'
region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D)
kernel_generator = ME.KernelGenerator(
kernel_size, stride, dilation, region_type=region_type, axis_types=axis_types, dimension=D)
return ME.MinkowskiAvgUnpooling(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
kernel_generator=kernel_generator,
dimension=D)
def sum_pool(kernel_size, stride=1, dilation=1, conv_type=ConvType.HYPERCUBE, D=-1):
assert D > 0, 'Dimension must be a positive integer'
region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D)
kernel_generator = ME.KernelGenerator(
kernel_size, stride, dilation, region_type=region_type, axis_types=axis_types, dimension=D)
return ME.MinkowskiSumPooling(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
kernel_generator=kernel_generator,
dimension=D)
| ContrastiveSceneContexts-main | downstream/votenet/models/backbone/sparseconv/models_sparseconv/modules/common.py |
ContrastiveSceneContexts-main | downstream/votenet/models/backbone/sparseconv/lib/__init__.py |
|
from scipy.sparse import csr_matrix
import torch
class SparseMM(torch.autograd.Function):
"""
Sparse x dense matrix multiplication with autograd support.
Implementation by Soumith Chintala:
https://discuss.pytorch.org/t/
does-pytorch-support-autograd-on-sparse-matrix/6156/7
"""
def forward(self, matrix1, matrix2):
self.save_for_backward(matrix1, matrix2)
return torch.mm(matrix1, matrix2)
def backward(self, grad_output):
matrix1, matrix2 = self.saved_tensors
grad_matrix1 = grad_matrix2 = None
if self.needs_input_grad[0]:
grad_matrix1 = torch.mm(grad_output, matrix2.t())
if self.needs_input_grad[1]:
grad_matrix2 = torch.mm(matrix1.t(), grad_output)
return grad_matrix1, grad_matrix2
def sparse_float_tensor(values, indices, size=None):
"""
Return a torch sparse matrix give values and indices (row_ind, col_ind).
If the size is an integer, return a square matrix with side size.
If the size is a torch.Size, use it to initialize the out tensor.
If none, the size is inferred.
"""
indices = torch.stack(indices).int()
sargs = [indices, values.float()]
if size is not None:
# Use the provided size
if isinstance(size, int):
size = torch.Size((size, size))
sargs.append(size)
if values.is_cuda:
return torch.cuda.sparse.FloatTensor(*sargs)
else:
return torch.sparse.FloatTensor(*sargs)
def diags(values, size=None):
values = values.view(-1)
n = values.nelement()
size = torch.Size((n, n))
indices = (torch.arange(0, n), torch.arange(0, n))
return sparse_float_tensor(values, indices, size)
def sparse_to_csr_matrix(tensor):
tensor = tensor.cpu()
inds = tensor._indices().numpy()
vals = tensor._values().numpy()
return csr_matrix((vals, (inds[0], inds[1])), shape=[s for s in tensor.shape])
def csr_matrix_to_sparse(mat):
row_ind, col_ind = mat.nonzero()
return sparse_float_tensor(
torch.from_numpy(mat.data),
(torch.from_numpy(row_ind), torch.from_numpy(col_ind)),
size=torch.Size(mat.shape))
| ContrastiveSceneContexts-main | downstream/votenet/models/backbone/sparseconv/lib/math_functions.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
''' Modified based on Ref: https://github.com/erikwijmans/Pointnet2_PyTorch '''
import torch
import torch.nn as nn
from typing import List, Tuple
class SharedMLP(nn.Sequential):
def __init__(
self,
args: List[int],
*,
bn: bool = False,
activation=nn.ReLU(inplace=True),
preact: bool = False,
first: bool = False,
name: str = ""
):
super().__init__()
for i in range(len(args) - 1):
self.add_module(
name + 'layer{}'.format(i),
Conv2d(
args[i],
args[i + 1],
bn=(not first or not preact or (i != 0)) and bn,
activation=activation
if (not first or not preact or (i != 0)) else None,
preact=preact
)
)
class _BNBase(nn.Sequential):
def __init__(self, in_size, batch_norm=None, name=""):
super().__init__()
self.add_module(name + "bn", batch_norm(in_size))
nn.init.constant_(self[0].weight, 1.0)
nn.init.constant_(self[0].bias, 0)
class BatchNorm1d(_BNBase):
def __init__(self, in_size: int, *, name: str = ""):
super().__init__(in_size, batch_norm=nn.BatchNorm1d, name=name)
class BatchNorm2d(_BNBase):
def __init__(self, in_size: int, name: str = ""):
super().__init__(in_size, batch_norm=nn.BatchNorm2d, name=name)
class BatchNorm3d(_BNBase):
def __init__(self, in_size: int, name: str = ""):
super().__init__(in_size, batch_norm=nn.BatchNorm3d, name=name)
class _ConvBase(nn.Sequential):
def __init__(
self,
in_size,
out_size,
kernel_size,
stride,
padding,
activation,
bn,
init,
conv=None,
batch_norm=None,
bias=True,
preact=False,
name=""
):
super().__init__()
bias = bias and (not bn)
conv_unit = conv(
in_size,
out_size,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=bias
)
init(conv_unit.weight)
if bias:
nn.init.constant_(conv_unit.bias, 0)
if bn:
if not preact:
bn_unit = batch_norm(out_size)
else:
bn_unit = batch_norm(in_size)
if preact:
if bn:
self.add_module(name + 'bn', bn_unit)
if activation is not None:
self.add_module(name + 'activation', activation)
self.add_module(name + 'conv', conv_unit)
if not preact:
if bn:
self.add_module(name + 'bn', bn_unit)
if activation is not None:
self.add_module(name + 'activation', activation)
class Conv1d(_ConvBase):
def __init__(
self,
in_size: int,
out_size: int,
*,
kernel_size: int = 1,
stride: int = 1,
padding: int = 0,
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=nn.init.kaiming_normal_,
bias: bool = True,
preact: bool = False,
name: str = ""
):
super().__init__(
in_size,
out_size,
kernel_size,
stride,
padding,
activation,
bn,
init,
conv=nn.Conv1d,
batch_norm=BatchNorm1d,
bias=bias,
preact=preact,
name=name
)
class Conv2d(_ConvBase):
def __init__(
self,
in_size: int,
out_size: int,
*,
kernel_size: Tuple[int, int] = (1, 1),
stride: Tuple[int, int] = (1, 1),
padding: Tuple[int, int] = (0, 0),
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=nn.init.kaiming_normal_,
bias: bool = True,
preact: bool = False,
name: str = ""
):
super().__init__(
in_size,
out_size,
kernel_size,
stride,
padding,
activation,
bn,
init,
conv=nn.Conv2d,
batch_norm=BatchNorm2d,
bias=bias,
preact=preact,
name=name
)
class Conv3d(_ConvBase):
def __init__(
self,
in_size: int,
out_size: int,
*,
kernel_size: Tuple[int, int, int] = (1, 1, 1),
stride: Tuple[int, int, int] = (1, 1, 1),
padding: Tuple[int, int, int] = (0, 0, 0),
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=nn.init.kaiming_normal_,
bias: bool = True,
preact: bool = False,
name: str = ""
):
super().__init__(
in_size,
out_size,
kernel_size,
stride,
padding,
activation,
bn,
init,
conv=nn.Conv3d,
batch_norm=BatchNorm3d,
bias=bias,
preact=preact,
name=name
)
class FC(nn.Sequential):
def __init__(
self,
in_size: int,
out_size: int,
*,
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=None,
preact: bool = False,
name: str = ""
):
super().__init__()
fc = nn.Linear(in_size, out_size, bias=not bn)
if init is not None:
init(fc.weight)
if not bn:
nn.init.constant_(fc.bias, 0)
if preact:
if bn:
self.add_module(name + 'bn', BatchNorm1d(in_size))
if activation is not None:
self.add_module(name + 'activation', activation)
self.add_module(name + 'fc', fc)
if not preact:
if bn:
self.add_module(name + 'bn', BatchNorm1d(out_size))
if activation is not None:
self.add_module(name + 'activation', activation)
def set_bn_momentum_default(bn_momentum):
def fn(m):
if isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):
m.momentum = bn_momentum
return fn
class BNMomentumScheduler(object):
def __init__(
self, model, bn_lambda, last_epoch=-1,
setter=set_bn_momentum_default
):
if not isinstance(model, nn.Module):
raise RuntimeError(
"Class '{}' is not a PyTorch nn Module".format(
type(model).__name__
)
)
self.model = model
self.setter = setter
self.lmbd = bn_lambda
self.step(last_epoch + 1)
self.last_epoch = last_epoch
def step(self, epoch=None):
if epoch is None:
epoch = self.last_epoch + 1
self.last_epoch = epoch
self.model.apply(self.setter(self.lmbd(epoch)))
| ContrastiveSceneContexts-main | downstream/votenet/models/backbone/pointnet2/pytorch_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import glob
import os
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
this_dir = os.path.dirname(os.path.abspath(__file__))
_ext_src_root = "_ext_src"
_ext_sources = glob.glob("{}/src/*.cpp".format(_ext_src_root)) + glob.glob(
"{}/src/*.cu".format(_ext_src_root)
)
setup(
name='pointnet2',
ext_modules=[
CUDAExtension(
name='pointnet2._ext',
sources=_ext_sources,
extra_compile_args={
"cxx": ["-O3"],
"nvcc": ["-O3", "-Xfatbin", "-compress-all"],
},
include_dirs=[os.path.join(this_dir, _ext_src_root, "include")],
)
],
cmdclass={
'build_ext': BuildExtension
}
)
| ContrastiveSceneContexts-main | downstream/votenet/models/backbone/pointnet2/setup.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
''' Modified based on: https://github.com/erikwijmans/Pointnet2_PyTorch '''
from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
import torch
from torch.autograd import Function
import torch.nn as nn
import pytorch_utils as pt_utils
import sys
try:
import builtins
except:
import __builtin__ as builtins
try:
import pointnet2._ext as _ext
except ImportError:
if not getattr(builtins, "__POINTNET2_SETUP__", False):
raise ImportError(
"Could not import _ext module.\n"
"Please see the setup instructions in the README: "
"https://github.com/erikwijmans/Pointnet2_PyTorch/blob/master/README.rst"
)
if False:
# Workaround for type hints without depending on the `typing` module
from typing import *
class RandomDropout(nn.Module):
def __init__(self, p=0.5, inplace=False):
super(RandomDropout, self).__init__()
self.p = p
self.inplace = inplace
def forward(self, X):
theta = torch.Tensor(1).uniform_(0, self.p)[0]
return pt_utils.feature_dropout_no_scaling(X, theta, self.train, self.inplace)
class FurthestPointSampling(Function):
@staticmethod
def forward(ctx, xyz, npoint):
# type: (Any, torch.Tensor, int) -> torch.Tensor
r"""
Uses iterative furthest point sampling to select a set of npoint features that have the largest
minimum distance
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor where N > npoint
npoint : int32
number of features in the sampled set
Returns
-------
torch.Tensor
(B, npoint) tensor containing the set
"""
fps_inds = _ext.furthest_point_sampling(xyz, npoint)
ctx.mark_non_differentiable(fps_inds)
return fps_inds
@staticmethod
def backward(xyz, a=None):
return None, None
furthest_point_sample = FurthestPointSampling.apply
class GatherOperation(Function):
@staticmethod
def forward(ctx, features, idx):
# type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
features : torch.Tensor
(B, C, N) tensor
idx : torch.Tensor
(B, npoint) tensor of the features to gather
Returns
-------
torch.Tensor
(B, C, npoint) tensor
"""
_, C, N = features.size()
ctx.for_backwards = (idx, C, N)
return _ext.gather_points(features, idx)
@staticmethod
def backward(ctx, grad_out):
idx, C, N = ctx.for_backwards
grad_features = _ext.gather_points_grad(grad_out.contiguous(), idx, N)
return grad_features, None
gather_operation = GatherOperation.apply
class ThreeNN(Function):
@staticmethod
def forward(ctx, unknown, known):
# type: (Any, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]
r"""
Find the three nearest neighbors of unknown in known
Parameters
----------
unknown : torch.Tensor
(B, n, 3) tensor of known features
known : torch.Tensor
(B, m, 3) tensor of unknown features
Returns
-------
dist : torch.Tensor
(B, n, 3) l2 distance to the three nearest neighbors
idx : torch.Tensor
(B, n, 3) index of 3 nearest neighbors
"""
dist2, idx = _ext.three_nn(unknown, known)
return torch.sqrt(dist2), idx
@staticmethod
def backward(ctx, a=None, b=None):
return None, None
three_nn = ThreeNN.apply
class ThreeInterpolate(Function):
@staticmethod
def forward(ctx, features, idx, weight):
# type(Any, torch.Tensor, torch.Tensor, torch.Tensor) -> Torch.Tensor
r"""
Performs weight linear interpolation on 3 features
Parameters
----------
features : torch.Tensor
(B, c, m) Features descriptors to be interpolated from
idx : torch.Tensor
(B, n, 3) three nearest neighbors of the target features in features
weight : torch.Tensor
(B, n, 3) weights
Returns
-------
torch.Tensor
(B, c, n) tensor of the interpolated features
"""
B, c, m = features.size()
n = idx.size(1)
ctx.three_interpolate_for_backward = (idx, weight, m)
return _ext.three_interpolate(features, idx, weight)
@staticmethod
def backward(ctx, grad_out):
# type: (Any, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]
r"""
Parameters
----------
grad_out : torch.Tensor
(B, c, n) tensor with gradients of ouputs
Returns
-------
grad_features : torch.Tensor
(B, c, m) tensor with gradients of features
None
None
"""
idx, weight, m = ctx.three_interpolate_for_backward
grad_features = _ext.three_interpolate_grad(
grad_out.contiguous(), idx, weight, m
)
return grad_features, None, None
three_interpolate = ThreeInterpolate.apply
class GroupingOperation(Function):
@staticmethod
def forward(ctx, features, idx):
# type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
features : torch.Tensor
(B, C, N) tensor of features to group
idx : torch.Tensor
(B, npoint, nsample) tensor containing the indicies of features to group with
Returns
-------
torch.Tensor
(B, C, npoint, nsample) tensor
"""
B, nfeatures, nsample = idx.size()
_, C, N = features.size()
ctx.for_backwards = (idx, N)
return _ext.group_points(features, idx)
@staticmethod
def backward(ctx, grad_out):
# type: (Any, torch.tensor) -> Tuple[torch.Tensor, torch.Tensor]
r"""
Parameters
----------
grad_out : torch.Tensor
(B, C, npoint, nsample) tensor of the gradients of the output from forward
Returns
-------
torch.Tensor
(B, C, N) gradient of the features
None
"""
idx, N = ctx.for_backwards
grad_features = _ext.group_points_grad(grad_out.contiguous(), idx, N)
return grad_features, None
grouping_operation = GroupingOperation.apply
class BallQuery(Function):
@staticmethod
def forward(ctx, radius, nsample, xyz, new_xyz):
# type: (Any, float, int, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
radius : float
radius of the balls
nsample : int
maximum number of features in the balls
xyz : torch.Tensor
(B, N, 3) xyz coordinates of the features
new_xyz : torch.Tensor
(B, npoint, 3) centers of the ball query
Returns
-------
torch.Tensor
(B, npoint, nsample) tensor with the indicies of the features that form the query balls
"""
inds = _ext.ball_query(new_xyz, xyz, radius, nsample)
ctx.mark_non_differentiable(inds)
return inds
@staticmethod
def backward(ctx, a=None):
return None, None, None, None
ball_query = BallQuery.apply
class QueryAndGroup(nn.Module):
r"""
Groups with a ball query of radius
Parameters
---------
radius : float32
Radius of ball
nsample : int32
Maximum number of features to gather in the ball
"""
def __init__(self, radius, nsample, use_xyz=True, ret_grouped_xyz=False, normalize_xyz=False, sample_uniformly=False, ret_unique_cnt=False):
# type: (QueryAndGroup, float, int, bool) -> None
super(QueryAndGroup, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
self.ret_grouped_xyz = ret_grouped_xyz
self.normalize_xyz = normalize_xyz
self.sample_uniformly = sample_uniformly
self.ret_unique_cnt = ret_unique_cnt
if self.ret_unique_cnt:
assert(self.sample_uniformly)
def forward(self, xyz, new_xyz, features=None):
# type: (QueryAndGroup, torch.Tensor. torch.Tensor, torch.Tensor) -> Tuple[Torch.Tensor]
r"""
Parameters
----------
xyz : torch.Tensor
xyz coordinates of the features (B, N, 3)
new_xyz : torch.Tensor
centriods (B, npoint, 3)
features : torch.Tensor
Descriptors of the features (B, C, N)
Returns
-------
new_features : torch.Tensor
(B, 3 + C, npoint, nsample) tensor
"""
idx = ball_query(self.radius, self.nsample, xyz, new_xyz)
if self.sample_uniformly:
unique_cnt = torch.zeros((idx.shape[0], idx.shape[1]))
for i_batch in range(idx.shape[0]):
for i_region in range(idx.shape[1]):
unique_ind = torch.unique(idx[i_batch, i_region, :])
num_unique = unique_ind.shape[0]
unique_cnt[i_batch, i_region] = num_unique
sample_ind = torch.randint(0, num_unique, (self.nsample - num_unique,), dtype=torch.long)
all_ind = torch.cat((unique_ind, unique_ind[sample_ind]))
idx[i_batch, i_region, :] = all_ind
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping_operation(xyz_trans, idx) # (B, 3, npoint, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if self.normalize_xyz:
grouped_xyz /= self.radius
if features is not None:
grouped_features = grouping_operation(features, idx)
if self.use_xyz:
new_features = torch.cat(
[grouped_xyz, grouped_features], dim=1
) # (B, C + 3, npoint, nsample)
else:
new_features = grouped_features
else:
assert (
self.use_xyz
), "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
ret = [new_features]
if self.ret_grouped_xyz:
ret.append(grouped_xyz)
if self.ret_unique_cnt:
ret.append(unique_cnt)
if len(ret) == 1:
return ret[0]
else:
return tuple(ret)
class GroupAll(nn.Module):
r"""
Groups all features
Parameters
---------
"""
def __init__(self, use_xyz=True, ret_grouped_xyz=False):
# type: (GroupAll, bool) -> None
super(GroupAll, self).__init__()
self.use_xyz = use_xyz
def forward(self, xyz, new_xyz, features=None):
# type: (GroupAll, torch.Tensor, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor]
r"""
Parameters
----------
xyz : torch.Tensor
xyz coordinates of the features (B, N, 3)
new_xyz : torch.Tensor
Ignored
features : torch.Tensor
Descriptors of the features (B, C, N)
Returns
-------
new_features : torch.Tensor
(B, C + 3, 1, N) tensor
"""
grouped_xyz = xyz.transpose(1, 2).unsqueeze(2)
if features is not None:
grouped_features = features.unsqueeze(2)
if self.use_xyz:
new_features = torch.cat(
[grouped_xyz, grouped_features], dim=1
) # (B, 3 + C, 1, N)
else:
new_features = grouped_features
else:
new_features = grouped_xyz
if self.ret_grouped_xyz:
return new_features, grouped_xyz
else:
return new_features
| ContrastiveSceneContexts-main | downstream/votenet/models/backbone/pointnet2/pointnet2_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
''' Testing customized ops. '''
import torch
from torch.autograd import gradcheck
import numpy as np
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
import pointnet2_utils
def test_interpolation_grad():
batch_size = 1
feat_dim = 2
m = 4
feats = torch.randn(batch_size, feat_dim, m, requires_grad=True).float().cuda()
def interpolate_func(inputs):
idx = torch.from_numpy(np.array([[[0,1,2],[1,2,3]]])).int().cuda()
weight = torch.from_numpy(np.array([[[1,1,1],[2,2,2]]])).float().cuda()
interpolated_feats = pointnet2_utils.three_interpolate(inputs, idx, weight)
return interpolated_feats
assert (gradcheck(interpolate_func, feats, atol=1e-1, rtol=1e-1))
if __name__=='__main__':
test_interpolation_grad()
| ContrastiveSceneContexts-main | downstream/votenet/models/backbone/pointnet2/pointnet2_test.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
''' Pointnet2 layers.
Modified based on: https://github.com/erikwijmans/Pointnet2_PyTorch
Extended with the following:
1. Uniform sampling in each local region (sample_uniformly)
2. Return sampled points indices to support votenet.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
import pointnet2_utils
import pytorch_utils as pt_utils
from typing import List
class _PointnetSAModuleBase(nn.Module):
def __init__(self):
super().__init__()
self.npoint = None
self.groupers = None
self.mlps = None
def forward(self, xyz: torch.Tensor,
features: torch.Tensor = None) -> (torch.Tensor, torch.Tensor):
r"""
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor of the xyz coordinates of the features
features : torch.Tensor
(B, N, C) tensor of the descriptors of the the features
Returns
-------
new_xyz : torch.Tensor
(B, npoint, 3) tensor of the new features' xyz
new_features : torch.Tensor
(B, npoint, \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
new_features_list = []
xyz_flipped = xyz.transpose(1, 2).contiguous()
new_xyz = pointnet2_utils.gather_operation(
xyz_flipped,
pointnet2_utils.furthest_point_sample(xyz, self.npoint)
).transpose(1, 2).contiguous() if self.npoint is not None else None
for i in range(len(self.groupers)):
new_features = self.groupers[i](
xyz, new_xyz, features
) # (B, C, npoint, nsample)
new_features = self.mlps[i](
new_features
) # (B, mlp[-1], npoint, nsample)
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint)
new_features_list.append(new_features)
return new_xyz, torch.cat(new_features_list, dim=1)
class PointnetSAModuleMSG(_PointnetSAModuleBase):
r"""Pointnet set abstrction layer with multiscale grouping
Parameters
----------
npoint : int
Number of features
radii : list of float32
list of radii to group with
nsamples : list of int32
Number of samples in each ball query
mlps : list of list of int32
Spec of the pointnet before the global max_pool for each scale
bn : bool
Use batchnorm
"""
def __init__(
self,
*,
npoint: int,
radii: List[float],
nsamples: List[int],
mlps: List[List[int]],
bn: bool = True,
use_xyz: bool = True,
sample_uniformly: bool = False
):
super().__init__()
assert len(radii) == len(nsamples) == len(mlps)
self.npoint = npoint
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz, sample_uniformly=sample_uniformly)
if npoint is not None else pointnet2_utils.GroupAll(use_xyz)
)
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn))
class PointnetSAModule(PointnetSAModuleMSG):
r"""Pointnet set abstrction layer
Parameters
----------
npoint : int
Number of features
radius : float
Radius of ball
nsample : int
Number of samples in the ball query
mlp : list
Spec of the pointnet before the global max_pool
bn : bool
Use batchnorm
"""
def __init__(
self,
*,
mlp: List[int],
npoint: int = None,
radius: float = None,
nsample: int = None,
bn: bool = True,
use_xyz: bool = True
):
super().__init__(
mlps=[mlp],
npoint=npoint,
radii=[radius],
nsamples=[nsample],
bn=bn,
use_xyz=use_xyz
)
class PointnetSAModuleVotes(nn.Module):
''' Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG
with extra support for returning point indices for getting their GT votes '''
def __init__(
self,
*,
mlp: List[int],
npoint: int = None,
radius: float = None,
nsample: int = None,
bn: bool = True,
use_xyz: bool = True,
pooling: str = 'max',
sigma: float = None, # for RBF pooling
normalize_xyz: bool = False, # noramlize local XYZ with radius
sample_uniformly: bool = False,
ret_unique_cnt: bool = False
):
super().__init__()
self.npoint = npoint
self.radius = radius
self.nsample = nsample
self.pooling = pooling
self.mlp_module = None
self.use_xyz = use_xyz
self.sigma = sigma
if self.sigma is None:
self.sigma = self.radius/2
self.normalize_xyz = normalize_xyz
self.ret_unique_cnt = ret_unique_cnt
if npoint is not None:
self.grouper = pointnet2_utils.QueryAndGroup(radius, nsample,
use_xyz=use_xyz, ret_grouped_xyz=True, normalize_xyz=normalize_xyz,
sample_uniformly=sample_uniformly, ret_unique_cnt=ret_unique_cnt)
else:
self.grouper = pointnet2_utils.GroupAll(use_xyz, ret_grouped_xyz=True)
mlp_spec = mlp
if use_xyz and len(mlp_spec)>0:
mlp_spec[0] += 3
self.mlp_module = pt_utils.SharedMLP(mlp_spec, bn=bn)
def forward(self, xyz: torch.Tensor,
features: torch.Tensor = None,
inds: torch.Tensor = None) -> (torch.Tensor, torch.Tensor):
r"""
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor of the xyz coordinates of the features
features : torch.Tensor
(B, C, N) tensor of the descriptors of the the features
inds : torch.Tensor
(B, npoint) tensor that stores index to the xyz points (values in 0-N-1)
Returns
-------
new_xyz : torch.Tensor
(B, npoint, 3) tensor of the new features' xyz
new_features : torch.Tensor
(B, \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
inds: torch.Tensor
(B, npoint) tensor of the inds
"""
xyz_flipped = xyz.transpose(1, 2).contiguous()
if inds is None:
inds = pointnet2_utils.furthest_point_sample(xyz, self.npoint)
else:
assert(inds.shape[1] == self.npoint)
new_xyz = pointnet2_utils.gather_operation(
xyz_flipped, inds
).transpose(1, 2).contiguous() if self.npoint is not None else None
if not self.ret_unique_cnt:
grouped_features, grouped_xyz = self.grouper(
xyz, new_xyz, features
) # (B, C, npoint, nsample)
else:
grouped_features, grouped_xyz, unique_cnt = self.grouper(
xyz, new_xyz, features
) # (B, C, npoint, nsample), (B,3,npoint,nsample), (B,npoint)
new_features = self.mlp_module(
grouped_features
) # (B, mlp[-1], npoint, nsample)
if self.pooling == 'max':
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
elif self.pooling == 'avg':
new_features = F.avg_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
elif self.pooling == 'rbf':
# Use radial basis function kernel for weighted sum of features (normalized by nsample and sigma)
# Ref: https://en.wikipedia.org/wiki/Radial_basis_function_kernel
rbf = torch.exp(-1 * grouped_xyz.pow(2).sum(1,keepdim=False) / (self.sigma**2) / 2) # (B, npoint, nsample)
new_features = torch.sum(new_features * rbf.unsqueeze(1), -1, keepdim=True) / float(self.nsample) # (B, mlp[-1], npoint, 1)
new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint)
if not self.ret_unique_cnt:
return new_xyz, new_features, inds
else:
return new_xyz, new_features, inds, unique_cnt
class PointnetSAModuleMSGVotes(nn.Module):
''' Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG
with extra support for returning point indices for getting their GT votes '''
def __init__(
self,
*,
mlps: List[List[int]],
npoint: int,
radii: List[float],
nsamples: List[int],
bn: bool = True,
use_xyz: bool = True,
sample_uniformly: bool = False
):
super().__init__()
assert(len(mlps) == len(nsamples) == len(radii))
self.npoint = npoint
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz, sample_uniformly=sample_uniformly)
if npoint is not None else pointnet2_utils.GroupAll(use_xyz)
)
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn))
def forward(self, xyz: torch.Tensor,
features: torch.Tensor = None, inds: torch.Tensor = None) -> (torch.Tensor, torch.Tensor):
r"""
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor of the xyz coordinates of the features
features : torch.Tensor
(B, C, C) tensor of the descriptors of the the features
inds : torch.Tensor
(B, npoint) tensor that stores index to the xyz points (values in 0-N-1)
Returns
-------
new_xyz : torch.Tensor
(B, npoint, 3) tensor of the new features' xyz
new_features : torch.Tensor
(B, \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
inds: torch.Tensor
(B, npoint) tensor of the inds
"""
new_features_list = []
xyz_flipped = xyz.transpose(1, 2).contiguous()
if inds is None:
inds = pointnet2_utils.furthest_point_sample(xyz, self.npoint)
new_xyz = pointnet2_utils.gather_operation(
xyz_flipped, inds
).transpose(1, 2).contiguous() if self.npoint is not None else None
for i in range(len(self.groupers)):
new_features = self.groupers[i](
xyz, new_xyz, features
) # (B, C, npoint, nsample)
new_features = self.mlps[i](
new_features
) # (B, mlp[-1], npoint, nsample)
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint)
new_features_list.append(new_features)
return new_xyz, torch.cat(new_features_list, dim=1), inds
class PointnetFPModule(nn.Module):
r"""Propigates the features of one set to another
Parameters
----------
mlp : list
Pointnet module parameters
bn : bool
Use batchnorm
"""
def __init__(self, *, mlp: List[int], bn: bool = True):
super().__init__()
self.mlp = pt_utils.SharedMLP(mlp, bn=bn)
def forward(
self, unknown: torch.Tensor, known: torch.Tensor,
unknow_feats: torch.Tensor, known_feats: torch.Tensor
) -> torch.Tensor:
r"""
Parameters
----------
unknown : torch.Tensor
(B, n, 3) tensor of the xyz positions of the unknown features
known : torch.Tensor
(B, m, 3) tensor of the xyz positions of the known features
unknow_feats : torch.Tensor
(B, C1, n) tensor of the features to be propigated to
known_feats : torch.Tensor
(B, C2, m) tensor of features to be propigated
Returns
-------
new_features : torch.Tensor
(B, mlp[-1], n) tensor of the features of the unknown features
"""
if known is not None:
dist, idx = pointnet2_utils.three_nn(unknown, known)
dist_recip = 1.0 / (dist + 1e-8)
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = dist_recip / norm
interpolated_feats = pointnet2_utils.three_interpolate(
known_feats, idx, weight
)
else:
interpolated_feats = known_feats.expand(
*known_feats.size()[0:2], unknown.size(1)
)
if unknow_feats is not None:
new_features = torch.cat([interpolated_feats, unknow_feats],
dim=1) #(B, C2 + C1, n)
else:
new_features = interpolated_feats
new_features = new_features.unsqueeze(-1)
new_features = self.mlp(new_features)
return new_features.squeeze(-1)
class PointnetLFPModuleMSG(nn.Module):
''' Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG
learnable feature propagation layer.'''
def __init__(
self,
*,
mlps: List[List[int]],
radii: List[float],
nsamples: List[int],
post_mlp: List[int],
bn: bool = True,
use_xyz: bool = True,
sample_uniformly: bool = False
):
super().__init__()
assert(len(mlps) == len(nsamples) == len(radii))
self.post_mlp = pt_utils.SharedMLP(post_mlp, bn=bn)
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz,
sample_uniformly=sample_uniformly)
)
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn))
def forward(self, xyz2: torch.Tensor, xyz1: torch.Tensor,
features2: torch.Tensor, features1: torch.Tensor) -> torch.Tensor:
r""" Propagate features from xyz1 to xyz2.
Parameters
----------
xyz2 : torch.Tensor
(B, N2, 3) tensor of the xyz coordinates of the features
xyz1 : torch.Tensor
(B, N1, 3) tensor of the xyz coordinates of the features
features2 : torch.Tensor
(B, C2, N2) tensor of the descriptors of the the features
features1 : torch.Tensor
(B, C1, N1) tensor of the descriptors of the the features
Returns
-------
new_features1 : torch.Tensor
(B, \sum_k(mlps[k][-1]), N1) tensor of the new_features descriptors
"""
new_features_list = []
for i in range(len(self.groupers)):
new_features = self.groupers[i](
xyz1, xyz2, features1
) # (B, C1, N2, nsample)
new_features = self.mlps[i](
new_features
) # (B, mlp[-1], N2, nsample)
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], N2, 1)
new_features = new_features.squeeze(-1) # (B, mlp[-1], N2)
if features2 is not None:
new_features = torch.cat([new_features, features2],
dim=1) #(B, mlp[-1] + C2, N2)
new_features = new_features.unsqueeze(-1)
new_features = self.post_mlp(new_features)
new_features_list.append(new_features)
return torch.cat(new_features_list, dim=1).squeeze(-1)
if __name__ == "__main__":
from torch.autograd import Variable
torch.manual_seed(1)
torch.cuda.manual_seed_all(1)
xyz = Variable(torch.randn(2, 9, 3).cuda(), requires_grad=True)
xyz_feats = Variable(torch.randn(2, 9, 6).cuda(), requires_grad=True)
test_module = PointnetSAModuleMSG(
npoint=2, radii=[5.0, 10.0], nsamples=[6, 3], mlps=[[9, 3], [9, 6]]
)
test_module.cuda()
print(test_module(xyz, xyz_feats))
for _ in range(1):
_, new_features = test_module(xyz, xyz_feats)
new_features.backward(
torch.cuda.FloatTensor(*new_features.size()).fill_(1)
)
print(new_features)
print(xyz.grad)
| ContrastiveSceneContexts-main | downstream/votenet/models/backbone/pointnet2/pointnet2_modules.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import os
import sys
import logging
import numpy as np
import importlib
import warnings
import argparse
import torch.optim as optim
import torch.nn as nn
from datetime import datetime
from models.loss_helper import get_loss as criterion
from tensorboardX import SummaryWriter
from torch.optim import lr_scheduler
warnings.simplefilter(action='ignore', category=FutureWarning)
from models.backbone.pointnet2.pytorch_utils import BNMomentumScheduler
from models.dump_helper import dump_results, dump_results_
from models.ap_helper import APCalculator, parse_predictions, parse_groundtruths
from omegaconf import OmegaConf
from torch.utils.data import DataLoader
from torch.serialization import default_restore_location
from lib.distributed import multi_proc_run, is_master_proc, get_world_size
class DetectionTrainer():
def __init__(self, config):
self.is_master = is_master_proc(get_world_size()) if get_world_size() > 1 else True
self.cur_device = torch.cuda.current_device()
# load the configurations
self.setup_logging()
if os.path.exists('config.yaml'):
logging.info('===> Loading exsiting config file')
config = OmegaConf.load('config.yaml')
logging.info('===> Loaded exsiting config file')
logging.info('===> Configurations')
logging.info(config.pretty())
# Create Dataset and Dataloader
if config.data.dataset == 'sunrgbd':
from datasets.sunrgbd.sunrgbd_detection_dataset import SunrgbdDetectionVotesDataset, MAX_NUM_OBJ
from datasets.sunrgbd.model_util_sunrgbd import SunrgbdDatasetConfig
dataset_config = SunrgbdDatasetConfig()
train_dataset = SunrgbdDetectionVotesDataset('train',
num_points=config.data.num_points,
augment=True,
use_color=config.data.use_color,
use_height=(not config.data.no_height),
use_v1=(not config.data.use_sunrgbd_v2))
test_dataset = SunrgbdDetectionVotesDataset(config.test.phase,
num_points=config.data.num_points,
augment=False,
use_color=config.data.use_color,
use_height=(not config.data.no_height),
use_v1=(not config.data.use_sunrgbd_v2))
elif config.data.dataset == 'scannet':
from datasets.scannet.scannet_detection_dataset import ScannetDetectionDataset, MAX_NUM_OBJ
from datasets.scannet.model_util_scannet import ScannetDatasetConfig
dataset_config = ScannetDatasetConfig()
train_dataset = ScannetDetectionDataset('train',
num_points=config.data.num_points,
augment=True,
use_color=config.data.use_color,
use_height=(not config.data.no_height),
by_scenes=config.data.by_scenes,
by_points=config.data.by_points)
test_dataset = ScannetDetectionDataset(config.test.phase,
num_points=config.data.num_points,
augment=False,
use_color=config.data.use_color,
use_height=(not config.data.no_height))
else:
logging.info('Unknown dataset %s. Exiting...'%(config.data.dataset))
exit(-1)
COLLATE_FN = None
if config.data.voxelization:
from models.backbone.sparseconv.voxelized_dataset import VoxelizationDataset, collate_fn
train_dataset = VoxelizationDataset(train_dataset, config.data.voxel_size)
test_dataset = VoxelizationDataset(test_dataset, config.data.voxel_size)
COLLATE_FN = collate_fn
logging.info('training: {}, testing: {}'.format(len(train_dataset), len(test_dataset)))
self.sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if get_world_size() > 1 else None
train_dataloader = DataLoader(
train_dataset,
batch_size=config.data.batch_size // config.misc.num_gpus,
shuffle=(self.sampler is None),
sampler=self.sampler,
num_workers=config.data.num_workers,
collate_fn=COLLATE_FN)
test_dataloader = DataLoader(
test_dataset,
batch_size=1,
shuffle=False,
num_workers=1,
collate_fn=COLLATE_FN)
logging.info('train dataloader: {}, test dataloader: {}'.format(len(train_dataloader),len(test_dataloader)))
# Init the model and optimzier
MODEL = importlib.import_module('models.' + config.net.model) # import network module
num_input_channel = int(config.data.use_color)*3 + int(not config.data.no_height)*1
if config.net.model == 'boxnet':
Detector = MODEL.BoxNet
else:
Detector = MODEL.VoteNet
net = Detector(num_class=dataset_config.num_class,
num_heading_bin=dataset_config.num_heading_bin,
num_size_cluster=dataset_config.num_size_cluster,
mean_size_arr=dataset_config.mean_size_arr,
num_proposal=config.net.num_target,
input_feature_dim=num_input_channel,
vote_factor=config.net.vote_factor,
sampling=config.net.cluster_sampling,
backbone=config.net.backbone)
if config.net.weights != '':
#assert config.net.backbone == "sparseconv", "only support sparseconv"
print('===> Loading weights: ' + config.net.weights)
state = torch.load(config.net.weights, map_location=lambda s, l: default_restore_location(s, 'cpu'))
model = net
if config.net.is_train:
model = net.backbone_net
if config.net.backbone == "sparseconv":
model = net.backbone_net.net
matched_weights = DetectionTrainer.load_state_with_same_shape(model, state['state_dict'])
model_dict = model.state_dict()
model_dict.update(matched_weights)
model.load_state_dict(model_dict)
net.to(self.cur_device)
if get_world_size() > 1:
net = torch.nn.parallel.DistributedDataParallel(
module=net, device_ids=[self.cur_device], output_device=self.cur_device, broadcast_buffers=False)
# Load the Adam optimizer
self.optimizer = optim.Adam(net.parameters(), lr=config.optimizer.learning_rate, weight_decay=config.optimizer.weight_decay)
# writer
if self.is_master:
self.writer = SummaryWriter(log_dir='tensorboard')
self.config = config
self.dataset_config = dataset_config
self.net = net
self.train_dataloader = train_dataloader
self.test_dataloader = test_dataloader
self.best_mAP = -1
# Used for AP calculation
self.CONFIG_DICT = {'remove_empty_box':False, 'use_3d_nms':True,
'nms_iou':0.25, 'use_old_type_nms':False, 'cls_nms':True,
'per_class_proposal': True, 'conf_thresh':0.05, 'dataset_config': dataset_config}
# Used for AP calculation
self.CONFIG_DICT_TEST = {'remove_empty_box': (not config.test.faster_eval),
'use_3d_nms': config.test.use_3d_nms,
'nms_iou': config.test.nms_iou,
'use_old_type_nms': config.test.use_old_type_nms,
'cls_nms': config.test.use_cls_nms,
'per_class_proposal': config.test.per_class_proposal,
'conf_thresh': config.test.conf_thresh,
'dataset_config': dataset_config}
# Load checkpoint if there is any
self.start_epoch = 0
CHECKPOINT_PATH = os.path.join('checkpoint.tar')
if os.path.isfile(CHECKPOINT_PATH):
checkpoint = torch.load(CHECKPOINT_PATH)
if get_world_size() > 1:
_model = self.net.module
else:
_model = self.net
_model.load_state_dict(checkpoint['state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.start_epoch = checkpoint['epoch']
self.best_mAP = checkpoint['best_mAP']
logging.info("-> loaded checkpoint %s (epoch: %d)"%(CHECKPOINT_PATH, self.start_epoch))
# Decay Batchnorm momentum from 0.5 to 0.999
# note: pytorch's BN momentum (default 0.1)= 1 - tensorflow's BN momentum
BN_MOMENTUM_INIT = 0.5
BN_MOMENTUM_MAX = 0.001
BN_DECAY_STEP = config.optimizer.bn_decay_step
BN_DECAY_RATE = config.optimizer.bn_decay_rate
bn_lbmd = lambda it: max(BN_MOMENTUM_INIT * BN_DECAY_RATE**(int(it / BN_DECAY_STEP)), BN_MOMENTUM_MAX)
self.bnm_scheduler = BNMomentumScheduler(net, bn_lambda=bn_lbmd, last_epoch=self.start_epoch-1)
def setup_logging(self):
ch = logging.StreamHandler(sys.stdout)
logging.getLogger().setLevel(logging.WARN)
if self.is_master:
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(
format=os.uname()[1].split('.')[0] + ' %(asctime)s %(message)s',
datefmt='%m/%d %H:%M:%S',
handlers=[ch])
@staticmethod
def load_state_with_same_shape(model, weights):
model_state = model.state_dict()
if list(weights.keys())[0].startswith('module.'):
print("Loading multigpu weights with module. prefix...")
weights = {k.partition('module.')[2]:weights[k] for k in weights.keys()}
if list(weights.keys())[0].startswith('encoder.'):
logging.info("Loading multigpu weights with encoder. prefix...")
weights = {k.partition('encoder.')[2]:weights[k] for k in weights.keys()}
# print(weights.items())
filtered_weights = {
k: v for k, v in weights.items() if k in model_state and v.size() == model_state[k].size()
}
print("Loading weights:" + ', '.join(filtered_weights.keys()))
return filtered_weights
@staticmethod
def get_current_lr(epoch, config):
lr = config.optimizer.learning_rate
for i,lr_decay_epoch in enumerate(config.optimizer.lr_decay_steps):
if epoch >= lr_decay_epoch:
lr *= config.optimizer.lr_decay_rates[i]
return lr
@staticmethod
def adjust_learning_rate(optimizer, epoch, config):
lr = DetectionTrainer.get_current_lr(epoch, config)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def train_one_epoch(self, epoch_cnt):
stat_dict = {} # collect statistics
DetectionTrainer.adjust_learning_rate(self.optimizer, epoch_cnt, self.config)
self.bnm_scheduler.step() # decay BN momentum
self.net.train() # set model to training mode
for batch_idx, batch_data_label in enumerate(self.train_dataloader):
for key in batch_data_label:
if key == 'scan_name':
continue
batch_data_label[key] = batch_data_label[key].cuda()
# Forward pass
self.optimizer.zero_grad()
inputs = {'point_clouds': batch_data_label['point_clouds']}
if 'voxel_coords' in batch_data_label:
inputs.update({
'voxel_coords': batch_data_label['voxel_coords'],
'voxel_inds': batch_data_label['voxel_inds'],
'voxel_feats': batch_data_label['voxel_feats']})
end_points = self.net(inputs)
# Compute loss and gradients, update parameters.
for key in batch_data_label:
assert(key not in end_points)
end_points[key] = batch_data_label[key]
loss, end_points = criterion(end_points, self.dataset_config)
loss.backward()
self.optimizer.step()
# Accumulate statistics and print out
for key in end_points:
if 'loss' in key or 'acc' in key or 'ratio' in key:
if key not in stat_dict: stat_dict[key] = 0
stat_dict[key] += end_points[key].item()
batch_interval = 10
if ((batch_idx+1) % batch_interval == 0) and self.is_master:
logging.info(' ---- batch: %03d ----' % (batch_idx+1))
for key in stat_dict:
self.writer.add_scalar('training/{}'.format(key), stat_dict[key]/batch_interval,
(epoch_cnt*len(self.train_dataloader)+batch_idx)*self.config.data.batch_size)
for key in sorted(stat_dict.keys()):
logging.info('mean %s: %f'%(key, stat_dict[key]/batch_interval))
stat_dict[key] = 0
def evaluate_one_epoch(self, epoch_cnt):
np.random.seed(0)
stat_dict = {} # collect statistics
ap_calculator = APCalculator(ap_iou_thresh=self.config.test.ap_iou, class2type_map=self.dataset_config.class2type)
self.net.eval() # set model to eval mode (for bn and dp)
for batch_idx, batch_data_label in enumerate(self.test_dataloader):
if batch_idx % 10 == 0:
logging.info('Eval batch: %d'%(batch_idx))
for key in batch_data_label:
if key == 'scan_name':
continue
batch_data_label[key] = batch_data_label[key].cuda()
# Forward pass
inputs = {'point_clouds': batch_data_label['point_clouds']}
if 'voxel_coords' in batch_data_label:
inputs.update({
'voxel_coords': batch_data_label['voxel_coords'],
'voxel_inds': batch_data_label['voxel_inds'],
'voxel_feats': batch_data_label['voxel_feats']})
with torch.no_grad():
end_points = self.net(inputs)
# Compute loss
for key in batch_data_label:
assert(key not in end_points)
end_points[key] = batch_data_label[key]
loss, end_points = criterion(end_points, self.dataset_config)
# Accumulate statistics and print out
for key in end_points:
if 'loss' in key or 'acc' in key or 'ratio' in key:
if key not in stat_dict: stat_dict[key] = 0
stat_dict[key] += end_points[key].item()
batch_pred_map_cls = parse_predictions(end_points, self.CONFIG_DICT)
batch_gt_map_cls = parse_groundtruths(end_points, self.CONFIG_DICT)
ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls)
# Dump evaluation results for visualization
if self.config.data.dump_results and batch_idx == 0 and epoch_cnt %10 == 0 and self.is_master:
dump_results(end_points, 'results', self.dataset_config)
# Log statistics
logging.info('eval mean %s: %f'%(key, stat_dict[key]/(float(batch_idx+1))))
if self.is_master:
for key in sorted(stat_dict.keys()):
self.writer.add_scalar('validation/{}'.format(key), stat_dict[key]/float(batch_idx+1),
(epoch_cnt+1)*len(self.train_dataloader)*self.config.data.batch_size)
# Evaluate average precision
metrics_dict = ap_calculator.compute_metrics()
for key in metrics_dict:
logging.info('eval %s: %f'%(key, metrics_dict[key]))
if self.is_master:
self.writer.add_scalar('validation/mAP{}'.format(self.config.test.ap_iou), metrics_dict['mAP'], (epoch_cnt+1)*len(self.train_dataloader)*self.config.data.batch_size)
#mean_loss = stat_dict['loss']/float(batch_idx+1)
return metrics_dict['mAP']
def train(self):
for epoch in range(self.start_epoch, self.config.optimizer.max_epoch):
logging.info('**** EPOCH %03d ****' % (epoch))
logging.info('Current learning rate: %f'%(DetectionTrainer.get_current_lr(epoch, self.config)))
logging.info('Current BN decay momentum: %f'%(self.bnm_scheduler.lmbd(self.bnm_scheduler.last_epoch)))
logging.info(str(datetime.now()))
# Reset numpy seed.
# REF: https://github.com/pytorch/pytorch/issues/5059
np.random.seed()
if get_world_size() > 1:
self.sampler.set_epoch(epoch)
self.train_one_epoch(epoch)
if epoch % 5 == 4 and self.is_master: # Eval every 5 epochs
best_mAP = self.evaluate_one_epoch(epoch)
if best_mAP > self.best_mAP:
self.best_mAP = best_mAP
# Save checkpoint
save_dict = {'epoch': epoch+1, # after training one epoch, the start_epoch should be epoch+1
'optimizer_state_dict': self.optimizer.state_dict(),
'best_mAP': self.best_mAP}
if get_world_size() > 1:
save_dict['state_dict'] = self.net.module.state_dict()
else:
save_dict['state_dict'] = self.net.state_dict()
torch.save(save_dict, 'checkpoint.tar')
OmegaConf.save(self.config, 'config.yaml')
@staticmethod
def write_to_benchmark(data, scene_name):
from models.ap_helper import flip_axis_back_camera
OBJ_CLASS_IDS = np.array([3,4,5,6,7,8,9,10,11,12,14,16,24,28,33,34,36,39])
os.makedirs('benchmark_output', exist_ok=True)
bsize = len(scene_name)
for bsize_ in range(bsize):
write_list = []
cur_data = data[bsize_]
cur_name = scene_name[bsize_]
for class_id, bbox, score in cur_data:
bbox = flip_axis_back_camera(bbox)
minx = np.min(bbox[:,0])
miny = np.min(bbox[:,1])
minz = np.min(bbox[:,2])
maxx = np.max(bbox[:,0])
maxy = np.max(bbox[:,1])
maxz = np.max(bbox[:,2])
write_list.append([minx, miny, minz, maxx,maxy, maxz, OBJ_CLASS_IDS[class_id], score])
np.savetxt(os.path.join('benchmark_output', cur_name+'.txt'), np.array(write_list))
def test(self):
if self.config.test.use_cls_nms:
assert(self.config.test.use_3d_nms)
AP_IOU_THRESHOLDS = self.config.test.ap_iou_thresholds
logging.info(str(datetime.now()))
# Reset numpy seed.
# REF: https://github.com/pytorch/pytorch/issues/5059
np.random.seed(0)
stat_dict = {}
ap_calculator_list = [APCalculator(iou_thresh, self.dataset_config.class2type) for iou_thresh in AP_IOU_THRESHOLDS]
self.net.eval() # set model to eval mode (for bn and dp)
for batch_idx, batch_data_label in enumerate(self.test_dataloader):
if batch_idx % 10 == 0:
print('Eval batch: %d'%(batch_idx))
for key in batch_data_label:
if key == 'scan_name':
continue
batch_data_label[key] = batch_data_label[key].cuda()
# Forward pass
inputs = {'point_clouds': batch_data_label['point_clouds']}
if 'voxel_coords' in batch_data_label:
inputs.update({
'voxel_coords': batch_data_label['voxel_coords'],
'voxel_inds': batch_data_label['voxel_inds'],
'voxel_feats': batch_data_label['voxel_feats']})
with torch.no_grad():
end_points = self.net(inputs)
# Compute loss
for key in batch_data_label:
assert(key not in end_points)
end_points[key] = batch_data_label[key]
loss, end_points = criterion(end_points, self.dataset_config)
# Accumulate statistics and print out
for key in end_points:
if 'loss' in key or 'acc' in key or 'ratio' in key:
if key not in stat_dict: stat_dict[key] = 0
stat_dict[key] += end_points[key].item()
batch_pred_map_cls = parse_predictions(end_points, self.CONFIG_DICT_TEST)
batch_gt_map_cls = parse_groundtruths(end_points, self.CONFIG_DICT_TEST)
for ap_calculator in ap_calculator_list:
ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls)
# debug
if self.config.test.write_to_benchmark:
#from lib.utils.io3d import write_triangle_mesh
#write_triangle_mesh(batch_data_label['point_clouds'][0].cpu().numpy(), None, None, batch_data_label['scan_name'][0]+'.ply')
DetectionTrainer.write_to_benchmark(batch_pred_map_cls, batch_data_label['scan_name'])
if self.config.test.save_vis:
dump_results_(end_points, 'visualization', self.dataset_config)
# Log statistics
for key in sorted(stat_dict.keys()):
logging.info('eval mean %s: %f'%(key, stat_dict[key]/(float(batch_idx+1))))
# Evaluate average precision
if not self.config.test.write_to_benchmark:
for i, ap_calculator in enumerate(ap_calculator_list):
logging.info('-'*10 + 'iou_thresh: %f'%(AP_IOU_THRESHOLDS[i]) + '-'*10)
metrics_dict = ap_calculator.compute_metrics()
for key in metrics_dict:
logging.info('eval %s: %f'%(key, metrics_dict[key]))
mean_loss = stat_dict['loss']/float(batch_idx+1)
return mean_loss
| ContrastiveSceneContexts-main | downstream/votenet/lib/ddp_trainer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python3
import os
import time
import torch
import signal
import pickle
import threading
import random
import functools
import traceback
import torch.nn as nn
import torch.distributed as dist
import multiprocessing as mp
"""Multiprocessing error handler."""
class ChildException(Exception):
"""Wraps an exception from a child process."""
def __init__(self, child_trace):
super(ChildException, self).__init__(child_trace)
class ErrorHandler(object):
"""Multiprocessing error handler (based on fairseq's).
Listens for errors in child processes and
propagates the tracebacks to the parent process.
"""
def __init__(self, error_queue):
# Shared error queue
self.error_queue = error_queue
# Children processes sharing the error queue
self.children_pids = []
# Start a thread listening to errors
self.error_listener = threading.Thread(target=self.listen, daemon=True)
self.error_listener.start()
# Register the signal handler
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
"""Registers a child process."""
self.children_pids.append(pid)
def listen(self):
"""Listens for errors in the error queue."""
# Wait until there is an error in the queue
child_trace = self.error_queue.get()
# Put the error back for the signal handler
self.error_queue.put(child_trace)
# Invoke the signal handler
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, sig_num, stack_frame):
"""Signal handler."""
# Kill children processes
for pid in self.children_pids:
os.kill(pid, signal.SIGINT)
# Propagate the error from the child process
raise ChildException(self.error_queue.get())
"""Multiprocessing helpers."""
def run(proc_rank, world_size, port, error_queue, fun, fun_args, fun_kwargs):
"""Runs a function from a child process."""
try:
# Initialize the process group
init_process_group(proc_rank, world_size, port)
# Run the function
fun(*fun_args, **fun_kwargs)
except:
# Propagate exception to the parent process
error_queue.put(traceback.format_exc())
finally:
destroy_process_group()
def multi_proc_run(num_proc, port, fun, fun_args=(), fun_kwargs={}):
"""Runs a function in a multi-proc setting."""
# Handle errors from training subprocesses
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Run each training subprocess
ps = []
for i in range(num_proc):
p_i = mp.Process(
target=run,
args=(i, num_proc, port, error_queue, fun, fun_args, fun_kwargs)
)
ps.append(p_i)
p_i.start()
error_handler.add_child(p_i.pid)
# Wait for each subprocess to finish
for p in ps:
p.join()
"""Distributed helpers."""
def is_master_proc(num_gpus):
"""Determines if the current process is the master process.
Master process is responsible for logging, writing and loading checkpoints.
In the multi GPU setting, we assign the master role to the rank 0 process.
When training using a single GPU, there is only one training processes
which is considered the master processes.
"""
return num_gpus == 1 or torch.distributed.get_rank() == 0
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def all_gather_differentiable(tensor):
"""
Run differentiable gather function for SparseConv features with variable number of points.
tensor: [num_points, feature_dim]
"""
world_size = get_world_size()
if world_size == 1:
return [tensor]
num_points, f_dim = tensor.size()
local_np = torch.LongTensor([num_points]).to("cuda")
np_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)]
dist.all_gather(np_list, local_np)
np_list = [int(np.item()) for np in np_list]
max_np = max(np_list)
tensor_list = []
for _ in np_list:
tensor_list.append(torch.FloatTensor(size=(max_np, f_dim)).to("cuda"))
if local_np != max_np:
padding = torch.zeros(size=(max_np-local_np, f_dim)).to("cuda").float()
tensor = torch.cat((tensor, padding), dim=0)
assert tensor.size() == (max_np, f_dim)
dist.all_gather(tensor_list, tensor)
data_list = []
for gather_np, gather_tensor in zip(np_list, tensor_list):
gather_tensor = gather_tensor[:gather_np]
assert gather_tensor.size() == (gather_np, f_dim)
data_list.append(gather_tensor)
return data_list
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.LongTensor([tensor.numel()]).to("cuda")
size_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda"))
if local_size != max_size:
padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def init_process_group(proc_rank, world_size, port):
"""Initializes the default process group."""
# Set the GPU to use
torch.cuda.set_device(proc_rank)
# Initialize the process group
torch.distributed.init_process_group(
backend="nccl",
init_method="tcp://{}:{}".format("localhost", str(port)),
world_size=world_size,
rank=proc_rank
)
def destroy_process_group():
"""Destroys the default process group."""
torch.distributed.destroy_process_group()
| ContrastiveSceneContexts-main | downstream/votenet/lib/distributed.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" Utility functions for metric evaluation.
Author: Or Litany and Charles R. Qi
"""
import os
import sys
import torch
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
import numpy as np
# Mesh IO
import trimesh
# ----------------------------------------
# Precision and Recall
# ----------------------------------------
def multi_scene_precision_recall(labels, pred, iou_thresh, conf_thresh, label_mask, pred_mask=None):
'''
Args:
labels: (B, N, 6)
pred: (B, M, 6)
iou_thresh: scalar
conf_thresh: scalar
label_mask: (B, N,) with values in 0 or 1 to indicate which GT boxes to consider.
pred_mask: (B, M,) with values in 0 or 1 to indicate which PRED boxes to consider.
Returns:
TP,FP,FN,Precision,Recall
'''
# Make sure the masks are not Torch tensor, otherwise the mask==1 returns uint8 array instead
# of True/False array as in numpy
assert(not torch.is_tensor(label_mask))
assert(not torch.is_tensor(pred_mask))
TP, FP, FN = 0, 0, 0
if label_mask is None: label_mask = np.ones((labels.shape[0], labels.shape[1]))
if pred_mask is None: pred_mask = np.ones((pred.shape[0], pred.shape[1]))
for batch_idx in range(labels.shape[0]):
TP_i, FP_i, FN_i = single_scene_precision_recall(labels[batch_idx, label_mask[batch_idx,:]==1, :],
pred[batch_idx, pred_mask[batch_idx,:]==1, :],
iou_thresh, conf_thresh)
TP += TP_i
FP += FP_i
FN += FN_i
return TP, FP, FN, precision_recall(TP, FP, FN)
def single_scene_precision_recall(labels, pred, iou_thresh, conf_thresh):
"""Compute P and R for predicted bounding boxes. Ignores classes!
Args:
labels: (N x bbox) ground-truth bounding boxes (6 dims)
pred: (M x (bbox + conf)) predicted bboxes with confidence and maybe classification
Returns:
TP, FP, FN
"""
# for each pred box with high conf (C), compute IoU with all gt boxes.
# TP = number of times IoU > th ; FP = C - TP
# FN - number of scene objects without good match
gt_bboxes = labels[:, :6]
num_scene_bboxes = gt_bboxes.shape[0]
conf = pred[:, 6]
conf_pred_bbox = pred[np.where(conf > conf_thresh)[0], :6]
num_conf_pred_bboxes = conf_pred_bbox.shape[0]
# init an array to keep iou between generated and scene bboxes
iou_arr = np.zeros([num_conf_pred_bboxes, num_scene_bboxes])
for g_idx in range(num_conf_pred_bboxes):
for s_idx in range(num_scene_bboxes):
iou_arr[g_idx, s_idx] = calc_iou(conf_pred_bbox[g_idx ,:], gt_bboxes[s_idx, :])
good_match_arr = (iou_arr >= iou_thresh)
TP = good_match_arr.any(axis=1).sum()
FP = num_conf_pred_bboxes - TP
FN = num_scene_bboxes - good_match_arr.any(axis=0).sum()
return TP, FP, FN
def precision_recall(TP, FP, FN):
Prec = 1.0 * TP / (TP + FP) if TP+FP>0 else 0
Rec = 1.0 * TP / (TP + FN)
return Prec, Rec
def calc_iou(box_a, box_b):
"""Computes IoU of two axis aligned bboxes.
Args:
box_a, box_b: 6D of center and lengths
Returns:
iou
"""
max_a = box_a[0:3] + box_a[3:6]/2
max_b = box_b[0:3] + box_b[3:6]/2
min_max = np.array([max_a, max_b]).min(0)
min_a = box_a[0:3] - box_a[3:6]/2
min_b = box_b[0:3] - box_b[3:6]/2
max_min = np.array([min_a, min_b]).max(0)
if not ((min_max > max_min).all()):
return 0.0
intersection = (min_max - max_min).prod()
vol_a = box_a[3:6].prod()
vol_b = box_b[3:6].prod()
union = vol_a + vol_b - intersection
return 1.0*intersection / union
if __name__ == '__main__':
print('running some tests')
############
## Test IoU
############
box_a = np.array([0,0,0,1,1,1])
box_b = np.array([0,0,0,2,2,2])
expected_iou = 1.0/8
pred_iou = calc_iou(box_a, box_b)
assert expected_iou == pred_iou, 'function returned wrong IoU'
box_a = np.array([0,0,0,1,1,1])
box_b = np.array([10,10,10,2,2,2])
expected_iou = 0.0
pred_iou = calc_iou(box_a, box_b)
assert expected_iou == pred_iou, 'function returned wrong IoU'
print('IoU test -- PASSED')
#########################
## Test Precition Recall
#########################
gt_boxes = np.array([[0,0,0,1,1,1],[3, 0, 1, 1, 10, 1]])
detected_boxes = np.array([[0,0,0,1,1,1, 1.0],[3, 0, 1, 1, 10, 1, 0.9]])
TP, FP, FN = single_scene_precision_recall(gt_boxes, detected_boxes, 0.5, 0.5)
assert TP == 2 and FP == 0 and FN == 0
assert precision_recall(TP, FP, FN) == (1, 1)
detected_boxes = np.array([[0,0,0,1,1,1, 1.0]])
TP, FP, FN = single_scene_precision_recall(gt_boxes, detected_boxes, 0.5, 0.5)
assert TP == 1 and FP == 0 and FN == 1
assert precision_recall(TP, FP, FN) == (1, 0.5)
detected_boxes = np.array([[0,0,0,1,1,1, 1.0], [-1,-1,0,0.1,0.1,1, 1.0]])
TP, FP, FN = single_scene_precision_recall(gt_boxes, detected_boxes, 0.5, 0.5)
assert TP == 1 and FP == 1 and FN == 1
assert precision_recall(TP, FP, FN) == (0.5, 0.5)
# wrong box has low confidence
detected_boxes = np.array([[0,0,0,1,1,1, 1.0], [-1,-1,0,0.1,0.1,1, 0.1]])
TP, FP, FN = single_scene_precision_recall(gt_boxes, detected_boxes, 0.5, 0.5)
assert TP == 1 and FP == 0 and FN == 1
assert precision_recall(TP, FP, FN) == (1, 0.5)
print('Precition Recall test -- PASSED')
| ContrastiveSceneContexts-main | downstream/votenet/lib/utils/metric_util.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" Generic Code for Object Detection Evaluation
Input:
For each class:
For each image:
Predictions: box, score
Groundtruths: box
Output:
For each class:
precision-recal and average precision
Author: Charles R. Qi
Ref: https://raw.githubusercontent.com/rbgirshick/py-faster-rcnn/master/lib/datasets/voc_eval.py
"""
import numpy as np
def voc_ap(rec, prec, use_07_metric=False):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
from lib.utils.metric_util import calc_iou # axis-aligned 3D box IoU
def get_iou(bb1, bb2):
""" Compute IoU of two bounding boxes.
** Define your bod IoU function HERE **
"""
#pass
iou3d = calc_iou(bb1, bb2)
return iou3d
from lib.utils.box_util import box3d_iou
def get_iou_obb(bb1,bb2):
iou3d, iou2d = box3d_iou(bb1,bb2)
return iou3d
def get_iou_main(get_iou_func, args):
return get_iou_func(*args)
def eval_det_cls(pred, gt, ovthresh=0.25, use_07_metric=False, get_iou_func=get_iou):
""" Generic functions to compute precision/recall for object detection
for a single class.
Input:
pred: map of {img_id: [(bbox, score)]} where bbox is numpy array
gt: map of {img_id: [bbox]}
ovthresh: scalar, iou threshold
use_07_metric: bool, if True use VOC07 11 point method
Output:
rec: numpy array of length nd
prec: numpy array of length nd
ap: scalar, average precision
"""
# construct gt objects
class_recs = {} # {img_id: {'bbox': bbox list, 'det': matched list}}
npos = 0
for img_id in gt.keys():
bbox = np.array(gt[img_id])
det = [False] * len(bbox)
npos += len(bbox)
class_recs[img_id] = {'bbox': bbox, 'det': det}
# pad empty list to all other imgids
for img_id in pred.keys():
if img_id not in gt:
class_recs[img_id] = {'bbox': np.array([]), 'det': []}
# construct dets
image_ids = []
confidence = []
BB = []
for img_id in pred.keys():
for box,score in pred[img_id]:
image_ids.append(img_id)
confidence.append(score)
BB.append(box)
confidence = np.array(confidence)
BB = np.array(BB) # (nd,4 or 8,3 or 6)
# sort by confidence
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
BB = BB[sorted_ind, ...]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
#if d%100==0: print(d)
R = class_recs[image_ids[d]]
bb = BB[d,...].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
# compute overlaps
for j in range(BBGT.shape[0]):
iou = get_iou_main(get_iou_func, (bb, BBGT[j,...]))
if iou > ovmax:
ovmax = iou
jmax = j
#print d, ovmax
if ovmax > ovthresh:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
#print('NPOS: ', npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
return rec, prec, ap
def eval_det_cls_wrapper(arguments):
pred, gt, ovthresh, use_07_metric, get_iou_func = arguments
rec, prec, ap = eval_det_cls(pred, gt, ovthresh, use_07_metric, get_iou_func)
return (rec, prec, ap)
def eval_det(pred_all, gt_all, ovthresh=0.25, use_07_metric=False, get_iou_func=get_iou):
""" Generic functions to compute precision/recall for object detection
for multiple classes.
Input:
pred_all: map of {img_id: [(classname, bbox, score)]}
gt_all: map of {img_id: [(classname, bbox)]}
ovthresh: scalar, iou threshold
use_07_metric: bool, if true use VOC07 11 point method
Output:
rec: {classname: rec}
prec: {classname: prec_all}
ap: {classname: scalar}
"""
pred = {} # map {classname: pred}
gt = {} # map {classname: gt}
for img_id in pred_all.keys():
for classname, bbox, score in pred_all[img_id]:
if classname not in pred: pred[classname] = {}
if img_id not in pred[classname]:
pred[classname][img_id] = []
if classname not in gt: gt[classname] = {}
if img_id not in gt[classname]:
gt[classname][img_id] = []
pred[classname][img_id].append((bbox,score))
for img_id in gt_all.keys():
for classname, bbox in gt_all[img_id]:
if classname not in gt: gt[classname] = {}
if img_id not in gt[classname]:
gt[classname][img_id] = []
gt[classname][img_id].append(bbox)
rec = {}
prec = {}
ap = {}
for classname in gt.keys():
print('Computing AP for class: ', classname)
rec[classname], prec[classname], ap[classname] = eval_det_cls(pred[classname], gt[classname], ovthresh, use_07_metric, get_iou_func)
print(classname, ap[classname])
return rec, prec, ap
from multiprocessing import Pool
def eval_det_multiprocessing(pred_all, gt_all, ovthresh=0.25, use_07_metric=False, get_iou_func=get_iou):
""" Generic functions to compute precision/recall for object detection
for multiple classes.
Input:
pred_all: map of {img_id: [(classname, bbox, score)]}
gt_all: map of {img_id: [(classname, bbox)]}
ovthresh: scalar, iou threshold
use_07_metric: bool, if true use VOC07 11 point method
Output:
rec: {classname: rec}
prec: {classname: prec_all}
ap: {classname: scalar}
"""
pred = {} # map {classname: pred}
gt = {} # map {classname: gt}
for img_id in pred_all.keys():
for classname, bbox, score in pred_all[img_id]:
if classname not in pred: pred[classname] = {}
if img_id not in pred[classname]:
pred[classname][img_id] = []
if classname not in gt: gt[classname] = {}
if img_id not in gt[classname]:
gt[classname][img_id] = []
pred[classname][img_id].append((bbox,score))
for img_id in gt_all.keys():
for classname, bbox in gt_all[img_id]:
if classname not in gt: gt[classname] = {}
if img_id not in gt[classname]:
gt[classname][img_id] = []
gt[classname][img_id].append(bbox)
rec = {}
prec = {}
ap = {}
p = Pool(processes=10)
ret_values = p.map(eval_det_cls_wrapper, [(pred[classname], gt[classname], ovthresh, use_07_metric, get_iou_func) for classname in gt.keys() if classname in pred])
p.close()
for i, classname in enumerate(gt.keys()):
if classname in pred:
rec[classname], prec[classname], ap[classname] = ret_values[i]
else:
rec[classname] = 0
prec[classname] = 0
ap[classname] = 0
print(classname, ap[classname])
return rec, prec, ap
| ContrastiveSceneContexts-main | downstream/votenet/lib/utils/eval_det.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" Chamfer distance in Pytorch.
Author: Charles R. Qi
"""
import torch
import torch.nn as nn
import numpy as np
def huber_loss(error, delta=1.0):
"""
Args:
error: Torch tensor (d1,d2,...,dk)
Returns:
loss: Torch tensor (d1,d2,...,dk)
x = error = pred - gt or dist(pred,gt)
0.5 * |x|^2 if |x|<=d
0.5 * d^2 + d * (|x|-d) if |x|>d
Ref: https://github.com/charlesq34/frustum-pointnets/blob/master/models/model_util.py
"""
abs_error = torch.abs(error)
#quadratic = torch.min(abs_error, torch.FloatTensor([delta]))
quadratic = torch.clamp(abs_error, max=delta)
linear = (abs_error - quadratic)
loss = 0.5 * quadratic**2 + delta * linear
return loss
def nn_distance(pc1, pc2, l1smooth=False, delta=1.0, l1=False):
"""
Input:
pc1: (B,N,C) torch tensor
pc2: (B,M,C) torch tensor
l1smooth: bool, whether to use l1smooth loss
delta: scalar, the delta used in l1smooth loss
Output:
dist1: (B,N) torch float32 tensor
idx1: (B,N) torch int64 tensor
dist2: (B,M) torch float32 tensor
idx2: (B,M) torch int64 tensor
"""
N = pc1.shape[1]
M = pc2.shape[1]
pc1_expand_tile = pc1.unsqueeze(2).repeat(1,1,M,1)
pc2_expand_tile = pc2.unsqueeze(1).repeat(1,N,1,1)
pc_diff = pc1_expand_tile - pc2_expand_tile
if l1smooth:
pc_dist = torch.sum(huber_loss(pc_diff, delta), dim=-1) # (B,N,M)
elif l1:
pc_dist = torch.sum(torch.abs(pc_diff), dim=-1) # (B,N,M)
else:
pc_dist = torch.sum(pc_diff**2, dim=-1) # (B,N,M)
dist1, idx1 = torch.min(pc_dist, dim=2) # (B,N)
dist2, idx2 = torch.min(pc_dist, dim=1) # (B,M)
return dist1, idx1, dist2, idx2
def demo_nn_distance():
np.random.seed(0)
pc1arr = np.random.random((1,5,3))
pc2arr = np.random.random((1,6,3))
pc1 = torch.from_numpy(pc1arr.astype(np.float32))
pc2 = torch.from_numpy(pc2arr.astype(np.float32))
dist1, idx1, dist2, idx2 = nn_distance(pc1, pc2)
print(dist1)
print(idx1)
dist = np.zeros((5,6))
for i in range(5):
for j in range(6):
dist[i,j] = np.sum((pc1arr[0,i,:] - pc2arr[0,j,:]) ** 2)
print(dist)
print('-'*30)
print('L1smooth dists:')
dist1, idx1, dist2, idx2 = nn_distance(pc1, pc2, True)
print(dist1)
print(idx1)
dist = np.zeros((5,6))
for i in range(5):
for j in range(6):
error = np.abs(pc1arr[0,i,:] - pc2arr[0,j,:])
quad = np.minimum(error, 1.0)
linear = error - quad
loss = 0.5*quad**2 + 1.0*linear
dist[i,j] = np.sum(loss)
print(dist)
if __name__ == '__main__':
demo_nn_distance()
| ContrastiveSceneContexts-main | downstream/votenet/lib/utils/nn_distance.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" Utility functions for processing point clouds.
Author: Charles R. Qi and Or Litany
"""
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
# Point cloud IO
import numpy as np
try:
from plyfile import PlyData, PlyElement
except:
print("Please install the module 'plyfile' for PLY i/o, e.g.")
print("pip install plyfile")
sys.exit(-1)
# Mesh IO
import trimesh
import math
import matplotlib.pyplot as pyplot
# ----------------------------------------
# Point Cloud Sampling
# ----------------------------------------
def random_sampling(pc, num_sample, replace=None, return_choices=False):
""" Input is NxC, output is num_samplexC
"""
if replace is None: replace = (pc.shape[0]<num_sample)
choices = np.random.choice(pc.shape[0], num_sample, replace=replace)
if return_choices:
return pc[choices], choices
else:
return pc[choices]
# ----------------------------------------
# Point Cloud/Volume Conversions
# ----------------------------------------
def point_cloud_to_volume_batch(point_clouds, vsize=12, radius=1.0, flatten=True):
""" Input is BxNx3 batch of point cloud
Output is Bx(vsize^3)
"""
vol_list = []
for b in range(point_clouds.shape[0]):
vol = point_cloud_to_volume(np.squeeze(point_clouds[b,:,:]), vsize, radius)
if flatten:
vol_list.append(vol.flatten())
else:
vol_list.append(np.expand_dims(np.expand_dims(vol, -1), 0))
if flatten:
return np.vstack(vol_list)
else:
return np.concatenate(vol_list, 0)
def point_cloud_to_volume(points, vsize, radius=1.0):
""" input is Nx3 points.
output is vsize*vsize*vsize
assumes points are in range [-radius, radius]
"""
vol = np.zeros((vsize,vsize,vsize))
voxel = 2*radius/float(vsize)
locations = (points + radius)/voxel
locations = locations.astype(int)
vol[locations[:,0],locations[:,1],locations[:,2]] = 1.0
return vol
def volume_to_point_cloud(vol):
""" vol is occupancy grid (value = 0 or 1) of size vsize*vsize*vsize
return Nx3 numpy array.
"""
vsize = vol.shape[0]
assert(vol.shape[1] == vsize and vol.shape[1] == vsize)
points = []
for a in range(vsize):
for b in range(vsize):
for c in range(vsize):
if vol[a,b,c] == 1:
points.append(np.array([a,b,c]))
if len(points) == 0:
return np.zeros((0,3))
points = np.vstack(points)
return points
def point_cloud_to_volume_v2_batch(point_clouds, vsize=12, radius=1.0, num_sample=128):
""" Input is BxNx3 a batch of point cloud
Output is BxVxVxVxnum_samplex3
Added on Feb 19
"""
vol_list = []
for b in range(point_clouds.shape[0]):
vol = point_cloud_to_volume_v2(point_clouds[b,:,:], vsize, radius, num_sample)
vol_list.append(np.expand_dims(vol, 0))
return np.concatenate(vol_list, 0)
def point_cloud_to_volume_v2(points, vsize, radius=1.0, num_sample=128):
""" input is Nx3 points
output is vsize*vsize*vsize*num_sample*3
assumes points are in range [-radius, radius]
samples num_sample points in each voxel, if there are less than
num_sample points, replicate the points
Added on Feb 19
"""
vol = np.zeros((vsize,vsize,vsize,num_sample,3))
voxel = 2*radius/float(vsize)
locations = (points + radius)/voxel
locations = locations.astype(int)
loc2pc = {}
for n in range(points.shape[0]):
loc = tuple(locations[n,:])
if loc not in loc2pc:
loc2pc[loc] = []
loc2pc[loc].append(points[n,:])
for i in range(vsize):
for j in range(vsize):
for k in range(vsize):
if (i,j,k) not in loc2pc:
vol[i,j,k,:,:] = np.zeros((num_sample,3))
else:
pc = loc2pc[(i,j,k)] # a list of (3,) arrays
pc = np.vstack(pc) # kx3
# Sample/pad to num_sample points
if pc.shape[0]>num_sample:
pc = random_sampling(pc, num_sample, False)
elif pc.shape[0]<num_sample:
pc = np.lib.pad(pc, ((0,num_sample-pc.shape[0]),(0,0)), 'edge')
# Normalize
pc_center = (np.array([i,j,k])+0.5)*voxel - radius
pc = (pc - pc_center) / voxel # shift and scale
vol[i,j,k,:,:] = pc
return vol
def point_cloud_to_image_batch(point_clouds, imgsize, radius=1.0, num_sample=128):
""" Input is BxNx3 a batch of point cloud
Output is BxIxIxnum_samplex3
Added on Feb 19
"""
img_list = []
for b in range(point_clouds.shape[0]):
img = point_cloud_to_image(point_clouds[b,:,:], imgsize, radius, num_sample)
img_list.append(np.expand_dims(img, 0))
return np.concatenate(img_list, 0)
def point_cloud_to_image(points, imgsize, radius=1.0, num_sample=128):
""" input is Nx3 points
output is imgsize*imgsize*num_sample*3
assumes points are in range [-radius, radius]
samples num_sample points in each pixel, if there are less than
num_sample points, replicate the points
Added on Feb 19
"""
img = np.zeros((imgsize, imgsize, num_sample, 3))
pixel = 2*radius/float(imgsize)
locations = (points[:,0:2] + radius)/pixel # Nx2
locations = locations.astype(int)
loc2pc = {}
for n in range(points.shape[0]):
loc = tuple(locations[n,:])
if loc not in loc2pc:
loc2pc[loc] = []
loc2pc[loc].append(points[n,:])
for i in range(imgsize):
for j in range(imgsize):
if (i,j) not in loc2pc:
img[i,j,:,:] = np.zeros((num_sample,3))
else:
pc = loc2pc[(i,j)]
pc = np.vstack(pc)
if pc.shape[0]>num_sample:
pc = random_sampling(pc, num_sample, False)
elif pc.shape[0]<num_sample:
pc = np.lib.pad(pc, ((0,num_sample-pc.shape[0]),(0,0)), 'edge')
pc_center = (np.array([i,j])+0.5)*pixel - radius
pc[:,0:2] = (pc[:,0:2] - pc_center)/pixel
img[i,j,:,:] = pc
return img
# ----------------------------------------
# Point cloud IO
# ----------------------------------------
def read_ply(filename):
""" read XYZ point cloud from filename PLY file """
plydata = PlyData.read(filename)
pc = plydata['vertex'].data
pc_array = np.array([[x, y, z] for x,y,z in pc])
return pc_array
def write_ply(points, filename, text=True):
""" input: Nx3, write points to filename as PLY format. """
points = [(points[i,0], points[i,1], points[i,2]) for i in range(points.shape[0])]
vertex = np.array(points, dtype=[('x', 'f4'), ('y', 'f4'),('z', 'f4')])
el = PlyElement.describe(vertex, 'vertex', comments=['vertices'])
PlyData([el], text=text).write(filename)
def write_ply_color(points, labels, filename, num_classes=None, colormap=pyplot.cm.jet):
""" Color (N,3) points with labels (N) within range 0 ~ num_classes-1 as OBJ file """
labels = labels.astype(int)
N = points.shape[0]
if num_classes is None:
num_classes = np.max(labels)+1
else:
assert(num_classes>np.max(labels))
vertex = []
#colors = [pyplot.cm.jet(i/float(num_classes)) for i in range(num_classes)]
colors = [colormap(i/float(num_classes)) for i in range(num_classes)]
for i in range(N):
c = colors[labels[i]]
c = [int(x*255) for x in c]
vertex.append( (points[i,0],points[i,1],points[i,2],c[0],c[1],c[2]) )
vertex = np.array(vertex, dtype=[('x', 'f4'), ('y', 'f4'),('z', 'f4'),('red', 'u1'), ('green', 'u1'),('blue', 'u1')])
el = PlyElement.describe(vertex, 'vertex', comments=['vertices'])
PlyData([el], text=True).write(filename)
def write_ply_rgb(points, colors, out_filename, num_classes=None):
""" Color (N,3) points with RGB colors (N,3) within range [0,255] as OBJ file """
colors = colors.astype(int)
N = points.shape[0]
fout = open(out_filename, 'w')
for i in range(N):
c = colors[i,:]
fout.write('v %f %f %f %d %d %d\n' % (points[i,0],points[i,1],points[i,2],c[0],c[1],c[2]))
fout.close()
# ----------------------------------------
# Simple Point cloud and Volume Renderers
# ----------------------------------------
def pyplot_draw_point_cloud(points, output_filename):
""" points is a Nx3 numpy array """
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(points[:,0], points[:,1], points[:,2])
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
#savefig(output_filename)
def pyplot_draw_volume(vol, output_filename):
""" vol is of size vsize*vsize*vsize
output an image to output_filename
"""
points = volume_to_point_cloud(vol)
pyplot_draw_point_cloud(points, output_filename)
# ----------------------------------------
# Simple Point manipulations
# ----------------------------------------
def rotate_point_cloud(points, rotation_matrix=None):
""" Input: (n,3), Output: (n,3) """
# Rotate in-place around Z axis.
if rotation_matrix is None:
rotation_angle = np.random.uniform() * 2 * np.pi
sinval, cosval = np.sin(rotation_angle), np.cos(rotation_angle)
rotation_matrix = np.array([[cosval, sinval, 0],
[-sinval, cosval, 0],
[0, 0, 1]])
ctr = points.mean(axis=0)
rotated_data = np.dot(points-ctr, rotation_matrix) + ctr
return rotated_data, rotation_matrix
def rotate_pc_along_y(pc, rot_angle):
''' Input ps is NxC points with first 3 channels as XYZ
z is facing forward, x is left ward, y is downward
'''
cosval = np.cos(rot_angle)
sinval = np.sin(rot_angle)
rotmat = np.array([[cosval, -sinval],[sinval, cosval]])
pc[:,[0,2]] = np.dot(pc[:,[0,2]], np.transpose(rotmat))
return pc
def roty(t):
"""Rotation about the y-axis."""
c = np.cos(t)
s = np.sin(t)
return np.array([[c, 0, s],
[0, 1, 0],
[-s, 0, c]])
def roty_batch(t):
"""Rotation about the y-axis.
t: (x1,x2,...xn)
return: (x1,x2,...,xn,3,3)
"""
input_shape = t.shape
output = np.zeros(tuple(list(input_shape)+[3,3]))
c = np.cos(t)
s = np.sin(t)
output[...,0,0] = c
output[...,0,2] = s
output[...,1,1] = 1
output[...,2,0] = -s
output[...,2,2] = c
return output
def rotz(t):
"""Rotation about the z-axis."""
c = np.cos(t)
s = np.sin(t)
return np.array([[c, -s, 0],
[s, c, 0],
[0, 0, 1]])
# ----------------------------------------
# BBox
# ----------------------------------------
def bbox_corner_dist_measure(crnr1, crnr2):
""" compute distance between box corners to replace iou
Args:
crnr1, crnr2: Nx3 points of box corners in camera axis (y points down)
output is a scalar between 0 and 1
"""
dist = sys.maxsize
for y in range(4):
rows = ([(x+y)%4 for x in range(4)] + [4+(x+y)%4 for x in range(4)])
d_ = np.linalg.norm(crnr2[rows, :] - crnr1, axis=1).sum() / 8.0
if d_ < dist:
dist = d_
u = sum([np.linalg.norm(x[0,:] - x[6,:]) for x in [crnr1, crnr2]])/2.0
measure = max(1.0 - dist/u, 0)
print(measure)
return measure
def point_cloud_to_bbox(points):
""" Extract the axis aligned box from a pcl or batch of pcls
Args:
points: Nx3 points or BxNx3
output is 6 dim: xyz pos of center and 3 lengths
"""
which_dim = len(points.shape) - 2 # first dim if a single cloud and second if batch
mn, mx = points.min(which_dim), points.max(which_dim)
lengths = mx - mn
cntr = 0.5*(mn + mx)
return np.concatenate([cntr, lengths], axis=which_dim)
def write_bbox(scene_bbox, out_filename):
"""Export scene bbox to meshes
Args:
scene_bbox: (N x 6 numpy array): xyz pos of center and 3 lengths
out_filename: (string) filename
Note:
To visualize the boxes in MeshLab.
1. Select the objects (the boxes)
2. Filters -> Polygon and Quad Mesh -> Turn into Quad-Dominant Mesh
3. Select Wireframe view.
"""
def convert_box_to_trimesh_fmt(box):
ctr = box[:3]
lengths = box[3:]
trns = np.eye(4)
trns[0:3, 3] = ctr
trns[3,3] = 1.0
box_trimesh_fmt = trimesh.creation.box(lengths, trns)
return box_trimesh_fmt
scene = trimesh.scene.Scene()
for box in scene_bbox:
scene.add_geometry(convert_box_to_trimesh_fmt(box))
mesh_list = trimesh.util.concatenate(scene.dump())
# save to ply file
mesh_list.export(out_filename)
return
def write_oriented_bbox(scene_bbox, out_filename):
"""Export oriented (around Z axis) scene bbox to meshes
Args:
scene_bbox: (N x 7 numpy array): xyz pos of center and 3 lengths (dx,dy,dz)
and heading angle around Z axis.
Y forward, X right, Z upward. heading angle of positive X is 0,
heading angle of positive Y is 90 degrees.
out_filename: (string) filename
"""
def heading2rotmat(heading_angle):
pass
rotmat = np.zeros((3,3))
rotmat[2,2] = 1
cosval = np.cos(heading_angle)
sinval = np.sin(heading_angle)
rotmat[0:2,0:2] = np.array([[cosval, -sinval],[sinval, cosval]])
return rotmat
def convert_oriented_box_to_trimesh_fmt(box):
ctr = box[:3]
lengths = box[3:6]
trns = np.eye(4)
trns[0:3, 3] = ctr
trns[3,3] = 1.0
trns[0:3,0:3] = heading2rotmat(box[6])
box_trimesh_fmt = trimesh.creation.box(lengths, trns)
return box_trimesh_fmt
scene = trimesh.scene.Scene()
for box in scene_bbox:
scene.add_geometry(convert_oriented_box_to_trimesh_fmt(box))
mesh_list = trimesh.util.concatenate(scene.dump())
# save to ply file
mesh_list.export(out_filename)
return
def generate_bbox_mesh(bbox, output_file=None):
"""
bbox: np array (n, 6),
output_file: string
"""
def create_cylinder_mesh(radius, p0, p1, stacks=10, slices=10):
def compute_length_vec3(vec3):
return math.sqrt(vec3[0]*vec3[0] + vec3[1]*vec3[1] + vec3[2]*vec3[2])
def rotation(axis, angle):
rot = np.eye(4)
c = np.cos(-angle)
s = np.sin(-angle)
t = 1.0 - c
axis /= compute_length_vec3(axis)
x = axis[0]
y = axis[1]
z = axis[2]
rot[0,0] = 1 + t*(x*x-1)
rot[0,1] = z*s+t*x*y
rot[0,2] = -y*s+t*x*z
rot[1,0] = -z*s+t*x*y
rot[1,1] = 1+t*(y*y-1)
rot[1,2] = x*s+t*y*z
rot[2,0] = y*s+t*x*z
rot[2,1] = -x*s+t*y*z
rot[2,2] = 1+t*(z*z-1)
return rot
verts = []
indices = []
diff = (p1 - p0).astype(np.float32)
height = compute_length_vec3(diff)
for i in range(stacks+1):
for i2 in range(slices):
theta = i2 * 2.0 * math.pi / slices
pos = np.array([radius*math.cos(theta), radius*math.sin(theta), height*i/stacks])
verts.append(pos)
for i in range(stacks):
for i2 in range(slices):
i2p1 = math.fmod(i2 + 1, slices)
indices.append( np.array([(i + 1)*slices + i2, i*slices + i2, i*slices + i2p1], dtype=np.uint32) )
indices.append( np.array([(i + 1)*slices + i2, i*slices + i2p1, (i + 1)*slices + i2p1], dtype=np.uint32) )
transform = np.eye(4)
va = np.array([0, 0, 1], dtype=np.float32)
vb = diff
vb /= compute_length_vec3(vb)
axis = np.cross(vb, va)
angle = np.arccos(np.clip(np.dot(va, vb), -1, 1))
if angle != 0:
if compute_length_vec3(axis) == 0:
dotx = va[0]
if (math.fabs(dotx) != 1.0):
axis = np.array([1,0,0]) - dotx * va
else:
axis = np.array([0,1,0]) - va[1] * va
axis /= compute_length_vec3(axis)
transform = rotation(axis, -angle)
transform[:3,3] += p0
verts = [np.dot(transform, np.array([v[0], v[1], v[2], 1.0])) for v in verts]
verts = [np.array([v[0], v[1], v[2]]) / v[3] for v in verts]
return verts, indices
def get_bbox_edges(bbox_min, bbox_max):
def get_bbox_verts(bbox_min, bbox_max):
verts = [
np.array([bbox_min[0], bbox_min[1], bbox_min[2]]),
np.array([bbox_max[0], bbox_min[1], bbox_min[2]]),
np.array([bbox_max[0], bbox_max[1], bbox_min[2]]),
np.array([bbox_min[0], bbox_max[1], bbox_min[2]]),
np.array([bbox_min[0], bbox_min[1], bbox_max[2]]),
np.array([bbox_max[0], bbox_min[1], bbox_max[2]]),
np.array([bbox_max[0], bbox_max[1], bbox_max[2]]),
np.array([bbox_min[0], bbox_max[1], bbox_max[2]])
]
return verts
box_verts = get_bbox_verts(bbox_min, bbox_max)
edges = [
(box_verts[0], box_verts[1]),
(box_verts[1], box_verts[2]),
(box_verts[2], box_verts[3]),
(box_verts[3], box_verts[0]),
(box_verts[4], box_verts[5]),
(box_verts[5], box_verts[6]),
(box_verts[6], box_verts[7]),
(box_verts[7], box_verts[4]),
(box_verts[0], box_verts[4]),
(box_verts[1], box_verts[5]),
(box_verts[2], box_verts[6]),
(box_verts[3], box_verts[7])
]
return edges
radius = 0.02
offset = [0,0,0]
verts = []
indices = []
for box in bbox:
box_min = np.array([box[0], box[1], box[2]])
box_max = np.array([box[3], box[4], box[5]])
edges = get_bbox_edges(box_min, box_max)
for k in range(len(edges)):
cyl_verts, cyl_ind = create_cylinder_mesh(radius, edges[k][0], edges[k][1])
cur_num_verts = len(verts)
cyl_verts = [x + offset for x in cyl_verts]
cyl_ind = [x + cur_num_verts for x in cyl_ind]
verts.extend(cyl_verts)
indices.extend(cyl_ind)
return verts, indices
def write_oriented_bbox_(scene_bbox, out_filename):
"""Export oriented (around Z axis) scene bbox to meshes
Args:
scene_bbox: (N x 7 numpy array): xyz pos of center and 3 lengths (dx,dy,dz)
and heading angle around Z axis.
Y forward, X right, Z upward. heading angle of positive X is 0,
heading angle of positive Y is 90 degrees.
out_filename: (string) filename
"""
def write_ply_mesh(verts, colors, indices, output_file):
if colors is None:
colors = np.zeros_like(verts)
if indices is None:
indices = []
file = open(output_file, 'w')
file.write('ply \n')
file.write('format ascii 1.0\n')
file.write('element vertex {:d}\n'.format(len(verts)))
file.write('property float x\n')
file.write('property float y\n')
file.write('property float z\n')
file.write('property uchar red\n')
file.write('property uchar green\n')
file.write('property uchar blue\n')
file.write('element face {:d}\n'.format(len(indices)))
file.write('property list uchar uint vertex_indices\n')
file.write('end_header\n')
for vert, color in zip(verts, colors):
file.write("{:f} {:f} {:f} {:d} {:d} {:d}\n".format(vert[0], vert[1], vert[2] , int(color[0]*255), int(color[1]*255), int(color[2]*255)))
for ind in indices:
file.write('3 {:d} {:d} {:d}\n'.format(ind[0], ind[1], ind[2]))
file.close()
def heading2rotmat(heading_angle):
pass
rotmat = np.zeros((3,3))
rotmat[2,2] = 1
cosval = np.cos(heading_angle)
sinval = np.sin(heading_angle)
rotmat[0:2,0:2] = np.array([[cosval, -sinval],[sinval, cosval]])
return rotmat
def convert_oriented_box_to_trimesh_fmt(box):
ctr = box[:3]
lengths = box[3:6]
trns = np.eye(4)
trns[0:3, 3] = ctr
trns[3,3] = 1.0
trns[0:3,0:3] = heading2rotmat(box[6])
box = np.array([[-0.5,-0.5,-0.5, 0.5, 0.5, 0.5]])
box[:,0] = box[:,0] * lengths[0] + trns[0,3]
box[:,1] = box[:,1] * lengths[1] + trns[1,3]
box[:,2] = box[:,2] * lengths[2] + trns[2,3]
box[:,3] = box[:,3] * lengths[0] + trns[0,3]
box[:,4] = box[:,4] * lengths[1] + trns[1,3]
box[:,5] = box[:,5] * lengths[2] + trns[2,3]
vertices, indices = generate_bbox_mesh(box)
return vertices, indices
verts, inds = convert_oriented_box_to_trimesh_fmt(scene_bbox)
write_ply_mesh(verts, None, inds, out_filename)
return
def write_oriented_bbox_camera_coord(scene_bbox, out_filename):
"""Export oriented (around Y axis) scene bbox to meshes
Args:
scene_bbox: (N x 7 numpy array): xyz pos of center and 3 lengths (dx,dy,dz)
and heading angle around Y axis.
Z forward, X rightward, Y downward. heading angle of positive X is 0,
heading angle of negative Z is 90 degrees.
out_filename: (string) filename
"""
def heading2rotmat(heading_angle):
pass
rotmat = np.zeros((3,3))
rotmat[1,1] = 1
cosval = np.cos(heading_angle)
sinval = np.sin(heading_angle)
rotmat[0,:] = np.array([cosval, 0, sinval])
rotmat[2,:] = np.array([-sinval, 0, cosval])
return rotmat
def convert_oriented_box_to_trimesh_fmt(box):
ctr = box[:3]
lengths = box[3:6]
trns = np.eye(4)
trns[0:3, 3] = ctr
trns[3,3] = 1.0
trns[0:3,0:3] = heading2rotmat(box[6])
box_trimesh_fmt = trimesh.creation.box(lengths, trns)
return box_trimesh_fmt
scene = trimesh.scene.Scene()
for box in scene_bbox:
scene.add_geometry(convert_oriented_box_to_trimesh_fmt(box))
mesh_list = trimesh.util.concatenate(scene.dump())
# save to ply file
mesh_list.export(out_filename)
return
def write_lines_as_cylinders(pcl, filename, rad=0.005, res=64):
"""Create lines represented as cylinders connecting pairs of 3D points
Args:
pcl: (N x 2 x 3 numpy array): N pairs of xyz pos
filename: (string) filename for the output mesh (ply) file
rad: radius for the cylinder
res: number of sections used to create the cylinder
"""
scene = trimesh.scene.Scene()
for src,tgt in pcl:
# compute line
vec = tgt - src
M = trimesh.geometry.align_vectors([0,0,1],vec, False)
vec = tgt - src # compute again since align_vectors modifies vec in-place!
M[:3,3] = 0.5*src + 0.5*tgt
height = np.sqrt(np.dot(vec, vec))
scene.add_geometry(trimesh.creation.cylinder(radius=rad, height=height, sections=res, transform=M))
mesh_list = trimesh.util.concatenate(scene.dump())
mesh_list.export('%s.ply'%(filename))
# ----------------------------------------
# Testing
# ----------------------------------------
if __name__ == '__main__':
print('running some tests')
############
## Test "write_lines_as_cylinders"
############
pcl = np.random.rand(32, 2, 3)
write_lines_as_cylinders(pcl, 'point_connectors')
input()
scene_bbox = np.zeros((1,7))
scene_bbox[0,3:6] = np.array([1,2,3]) # dx,dy,dz
scene_bbox[0,6] = np.pi/4 # 45 degrees
write_oriented_bbox(scene_bbox, 'single_obb_45degree.ply')
############
## Test point_cloud_to_bbox
############
pcl = np.random.rand(32, 16, 3)
pcl_bbox = point_cloud_to_bbox(pcl)
assert pcl_bbox.shape == (32, 6)
pcl = np.random.rand(16, 3)
pcl_bbox = point_cloud_to_bbox(pcl)
assert pcl_bbox.shape == (6,)
############
## Test corner distance
############
crnr1 = np.array([[2.59038660e+00, 8.96107932e-01, 4.73305349e+00],
[4.12281644e-01, 8.96107932e-01, 4.48046631e+00],
[2.97129656e-01, 8.96107932e-01, 5.47344275e+00],
[2.47523462e+00, 8.96107932e-01, 5.72602993e+00],
[2.59038660e+00, 4.41155793e-03, 4.73305349e+00],
[4.12281644e-01, 4.41155793e-03, 4.48046631e+00],
[2.97129656e-01, 4.41155793e-03, 5.47344275e+00],
[2.47523462e+00, 4.41155793e-03, 5.72602993e+00]])
crnr2 = crnr1
print(bbox_corner_dist_measure(crnr1, crnr2))
print('tests PASSED')
| ContrastiveSceneContexts-main | downstream/votenet/lib/utils/pc_util.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from pc_util import bbox_corner_dist_measure
# boxes are axis aigned 2D boxes of shape (n,5) in FLOAT numbers with (x1,y1,x2,y2,score)
''' Ref: https://www.pyimagesearch.com/2015/02/16/faster-non-maximum-suppression-python/
Ref: https://github.com/vickyboy47/nms-python/blob/master/nms.py
'''
def nms_2d(boxes, overlap_threshold):
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
score = boxes[:,4]
area = (x2-x1)*(y2-y1)
I = np.argsort(score)
pick = []
while (I.size!=0):
last = I.size
i = I[-1]
pick.append(i)
suppress = [last-1]
for pos in range(last-1):
j = I[pos]
xx1 = max(x1[i],x1[j])
yy1 = max(y1[i],y1[j])
xx2 = min(x2[i],x2[j])
yy2 = min(y2[i],y2[j])
w = xx2-xx1
h = yy2-yy1
if (w>0 and h>0):
o = w*h/area[j]
print('Overlap is', o)
if (o>overlap_threshold):
suppress.append(pos)
I = np.delete(I,suppress)
return pick
def nms_2d_faster(boxes, overlap_threshold, old_type=False):
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
score = boxes[:,4]
area = (x2-x1)*(y2-y1)
I = np.argsort(score)
pick = []
while (I.size!=0):
last = I.size
i = I[-1]
pick.append(i)
xx1 = np.maximum(x1[i], x1[I[:last-1]])
yy1 = np.maximum(y1[i], y1[I[:last-1]])
xx2 = np.minimum(x2[i], x2[I[:last-1]])
yy2 = np.minimum(y2[i], y2[I[:last-1]])
w = np.maximum(0, xx2-xx1)
h = np.maximum(0, yy2-yy1)
if old_type:
o = (w*h)/area[I[:last-1]]
else:
inter = w*h
o = inter / (area[i] + area[I[:last-1]] - inter)
I = np.delete(I, np.concatenate(([last-1], np.where(o>overlap_threshold)[0])))
return pick
def nms_3d_faster(boxes, overlap_threshold, old_type=False):
x1 = boxes[:,0]
y1 = boxes[:,1]
z1 = boxes[:,2]
x2 = boxes[:,3]
y2 = boxes[:,4]
z2 = boxes[:,5]
score = boxes[:,6]
area = (x2-x1)*(y2-y1)*(z2-z1)
I = np.argsort(score)
pick = []
while (I.size!=0):
last = I.size
i = I[-1]
pick.append(i)
xx1 = np.maximum(x1[i], x1[I[:last-1]])
yy1 = np.maximum(y1[i], y1[I[:last-1]])
zz1 = np.maximum(z1[i], z1[I[:last-1]])
xx2 = np.minimum(x2[i], x2[I[:last-1]])
yy2 = np.minimum(y2[i], y2[I[:last-1]])
zz2 = np.minimum(z2[i], z2[I[:last-1]])
l = np.maximum(0, xx2-xx1)
w = np.maximum(0, yy2-yy1)
h = np.maximum(0, zz2-zz1)
if old_type:
o = (l*w*h)/area[I[:last-1]]
else:
inter = l*w*h
o = inter / (area[i] + area[I[:last-1]] - inter)
I = np.delete(I, np.concatenate(([last-1], np.where(o>overlap_threshold)[0])))
return pick
def nms_3d_faster_samecls(boxes, overlap_threshold, old_type=False):
x1 = boxes[:,0]
y1 = boxes[:,1]
z1 = boxes[:,2]
x2 = boxes[:,3]
y2 = boxes[:,4]
z2 = boxes[:,5]
score = boxes[:,6]
cls = boxes[:,7]
area = (x2-x1)*(y2-y1)*(z2-z1)
I = np.argsort(score)
pick = []
while (I.size!=0):
last = I.size
i = I[-1]
pick.append(i)
xx1 = np.maximum(x1[i], x1[I[:last-1]])
yy1 = np.maximum(y1[i], y1[I[:last-1]])
zz1 = np.maximum(z1[i], z1[I[:last-1]])
xx2 = np.minimum(x2[i], x2[I[:last-1]])
yy2 = np.minimum(y2[i], y2[I[:last-1]])
zz2 = np.minimum(z2[i], z2[I[:last-1]])
cls1 = cls[i]
cls2 = cls[I[:last-1]]
l = np.maximum(0, xx2-xx1)
w = np.maximum(0, yy2-yy1)
h = np.maximum(0, zz2-zz1)
if old_type:
o = (l*w*h)/area[I[:last-1]]
else:
inter = l*w*h
o = inter / (area[i] + area[I[:last-1]] - inter)
o = o * (cls1==cls2)
I = np.delete(I, np.concatenate(([last-1], np.where(o>overlap_threshold)[0])))
return pick
def nms_crnr_dist(boxes, conf, overlap_threshold):
I = np.argsort(conf)
pick = []
while (I.size!=0):
last = I.size
i = I[-1]
pick.append(i)
scores = []
for ind in I[:-1]:
scores.append(bbox_corner_dist_measure(boxes[i,:], boxes[ind, :]))
I = np.delete(I, np.concatenate(([last-1], np.where(np.array(scores)>overlap_threshold)[0])))
return pick
if __name__=='__main__':
a = np.random.random((100,5))
print(nms_2d(a,0.9))
print(nms_2d_faster(a,0.9))
| ContrastiveSceneContexts-main | downstream/votenet/lib/utils/nms.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
'''Code adapted from https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix'''
import os
import time
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
import sys
sys.path.append(BASE_DIR)
import tf_logger
class Visualizer():
def __init__(self, opt, name='train'):
# self.opt = opt
#self.logger = tf_logger.Logger(os.path.join(opt.logging_dir, opt.name))
#self.log_name = os.path.join(opt.checkpoint_dir, opt.name, 'loss_log.txt')
self.logger = tf_logger.Logger(os.path.join(opt.log_dir, name))
self.log_name = os.path.join(opt.log_dir, 'tf_visualizer_log.txt')
with open(self.log_name, "a") as log_file:
now = time.strftime("%c")
log_file.write('================ Training Loss (%s) ================\n' % now)
# |visuals|: dictionary of images to save
def log_images(self, visuals, step):
for label, image_numpy in visuals.items():
self.logger.image_summary(
label, [image_numpy], step)
# scalars: dictionary of scalar labels and values
def log_scalars(self, scalars, step):
for label, val in scalars.items():
self.logger.scalar_summary(label, val, step)
# scatter plots
def plot_current_points(self, points, disp_offset=10):
pass
# scalars: same format as |scalars| of plot_current_scalars
def print_current_scalars(self, epoch, i, scalars):
message = '(epoch: %d, iters: %d) ' % (epoch, i)
for k, v in scalars.items():
message += '%s: %.3f ' % (k, v)
print(message)
with open(self.log_name, "a") as log_file:
log_file.write('%s\n' % message)
| ContrastiveSceneContexts-main | downstream/votenet/lib/utils/tf_visualizer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import numpy as np
import trimesh
# color palette for nyu40 labels
def create_color_palette():
return [
(0, 0, 0),
(174, 199, 232), # wall
(152, 223, 138), # floor
(31, 119, 180), # cabinet
(255, 187, 120), # bed
(188, 189, 34), # chair
(140, 86, 75), # sofa
(255, 152, 150), # table
(214, 39, 40), # door
(197, 176, 213), # window
(148, 103, 189), # bookshelf
(196, 156, 148), # picture
(23, 190, 207), # counter
(178, 76, 76),
(247, 182, 210), # desk
(66, 188, 102),
(219, 219, 141), # curtain
(140, 57, 197),
(202, 185, 52),
(51, 176, 203),
(200, 54, 131),
(92, 193, 61),
(78, 71, 183),
(172, 114, 82),
(255, 127, 14), # refrigerator
(91, 163, 138),
(153, 98, 156),
(140, 153, 101),
(158, 218, 229), # shower curtain
(100, 125, 154),
(178, 127, 135),
(120, 185, 128),
(146, 111, 194),
(44, 160, 44), # toilet
(112, 128, 144), # sink
(96, 207, 209),
(227, 119, 194), # bathtub
(213, 92, 176),
(94, 106, 211),
(82, 84, 163), # otherfurn
(100, 85, 144),
]
def write_triangle_mesh(vertices, colors, faces, outputFile):
mesh = trimesh.Trimesh(vertices=vertices, vertex_colors=colors, faces=faces, process=False)
mesh.export(outputFile)
def read_triangle_mesh(filename):
mesh = trimesh.load_mesh(filename, process=False)
if isinstance(mesh, trimesh.PointCloud):
vertices = mesh.vertices
colors = mesh.colors
faces = None
elif isinstance(mesh, trimesh.Trimesh):
vertices = mesh.vertices
colors = mesh.visual.vertex_colors
faces = mesh.faces
return vertices, colors, faces
def generate_bbox_mesh(bbox):
"""
bbox: np array (n, 7), last one is instance/label id
"""
def create_cylinder_mesh(radius, p0, p1, stacks=10, slices=10):
def compute_length_vec3(vec3):
return math.sqrt(vec3[0]*vec3[0] + vec3[1]*vec3[1] + vec3[2]*vec3[2])
def rotation(axis, angle):
rot = np.eye(4)
c = np.cos(-angle)
s = np.sin(-angle)
t = 1.0 - c
axis /= compute_length_vec3(axis)
x = axis[0]
y = axis[1]
z = axis[2]
rot[0,0] = 1 + t*(x*x-1)
rot[0,1] = z*s+t*x*y
rot[0,2] = -y*s+t*x*z
rot[1,0] = -z*s+t*x*y
rot[1,1] = 1+t*(y*y-1)
rot[1,2] = x*s+t*y*z
rot[2,0] = y*s+t*x*z
rot[2,1] = -x*s+t*y*z
rot[2,2] = 1+t*(z*z-1)
return rot
verts = []
indices = []
diff = (p1 - p0).astype(np.float32)
height = compute_length_vec3(diff)
for i in range(stacks+1):
for i2 in range(slices):
theta = i2 * 2.0 * math.pi / slices
pos = np.array([radius*math.cos(theta), radius*math.sin(theta), height*i/stacks])
verts.append(pos)
for i in range(stacks):
for i2 in range(slices):
i2p1 = math.fmod(i2 + 1, slices)
indices.append( np.array([(i + 1)*slices + i2, i*slices + i2, i*slices + i2p1], dtype=np.uint32) )
indices.append( np.array([(i + 1)*slices + i2, i*slices + i2p1, (i + 1)*slices + i2p1], dtype=np.uint32) )
transform = np.eye(4)
va = np.array([0, 0, 1], dtype=np.float32)
vb = diff
vb /= compute_length_vec3(vb)
axis = np.cross(vb, va)
angle = np.arccos(np.clip(np.dot(va, vb), -1, 1))
if angle != 0:
if compute_length_vec3(axis) == 0:
dotx = va[0]
if (math.fabs(dotx) != 1.0):
axis = np.array([1,0,0]) - dotx * va
else:
axis = np.array([0,1,0]) - va[1] * va
axis /= compute_length_vec3(axis)
transform = rotation(axis, -angle)
transform[:3,3] += p0
verts = [np.dot(transform, np.array([v[0], v[1], v[2], 1.0])) for v in verts]
verts = [np.array([v[0], v[1], v[2]]) / v[3] for v in verts]
return verts, indices
def get_bbox_edges(bbox_min, bbox_max):
def get_bbox_verts(bbox_min, bbox_max):
verts = [
np.array([bbox_min[0], bbox_min[1], bbox_min[2]]),
np.array([bbox_max[0], bbox_min[1], bbox_min[2]]),
np.array([bbox_max[0], bbox_max[1], bbox_min[2]]),
np.array([bbox_min[0], bbox_max[1], bbox_min[2]]),
np.array([bbox_min[0], bbox_min[1], bbox_max[2]]),
np.array([bbox_max[0], bbox_min[1], bbox_max[2]]),
np.array([bbox_max[0], bbox_max[1], bbox_max[2]]),
np.array([bbox_min[0], bbox_max[1], bbox_max[2]])
]
return verts
box_verts = get_bbox_verts(bbox_min, bbox_max)
edges = [
(box_verts[0], box_verts[1]),
(box_verts[1], box_verts[2]),
(box_verts[2], box_verts[3]),
(box_verts[3], box_verts[0]),
(box_verts[4], box_verts[5]),
(box_verts[5], box_verts[6]),
(box_verts[6], box_verts[7]),
(box_verts[7], box_verts[4]),
(box_verts[0], box_verts[4]),
(box_verts[1], box_verts[5]),
(box_verts[2], box_verts[6]),
(box_verts[3], box_verts[7])
]
return edges
radius = 0.02
offset = [0,0,0]
verts = []
indices = []
colors = []
for box in bbox:
box_min = np.array([box[0], box[1], box[2]])
box_max = np.array([box[3], box[4], box[5]])
r, g, b = create_color_palette()[int(box[6]%41)]
edges = get_bbox_edges(box_min, box_max)
for k in range(len(edges)):
cyl_verts, cyl_ind = create_cylinder_mesh(radius, edges[k][0], edges[k][1])
cur_num_verts = len(verts)
cyl_color = [[r/255.0,g/255.0,b/255.0] for _ in cyl_verts]
cyl_verts = [x + offset for x in cyl_verts]
cyl_ind = [x + cur_num_verts for x in cyl_ind]
verts.extend(cyl_verts)
indices.extend(cyl_ind)
colors.extend(cyl_color)
return verts, colors, indices | ContrastiveSceneContexts-main | downstream/votenet/lib/utils/io3d.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import tensorflow as tf
import numpy as np
import scipy.misc
try:
from StringIO import StringIO # Python 2.7
except ImportError:
from io import BytesIO # Python 3.x
class Logger(object):
def __init__(self, log_dir):
"""Create a summary writer logging to log_dir."""
self.writer = tf.summary.FileWriter(log_dir)
def scalar_summary(self, tag, value, step):
"""Log a scalar variable."""
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])
self.writer.add_summary(summary, step)
def image_summary(self, tag, images, step):
"""Log a list of images."""
img_summaries = []
for i, img in enumerate(images):
# Write the image to a string
try:
s = StringIO()
except:
s = BytesIO()
scipy.misc.toimage(img).save(s, format="png")
# Create an Image object
img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(),
height=img.shape[0],
width=img.shape[1])
# Create a Summary value
img_summaries.append(tf.Summary.Value(tag='%s/%d' % (tag, i), image=img_sum))
# Create and write Summary
summary = tf.Summary(value=img_summaries)
self.writer.add_summary(summary, step)
def histo_summary(self, tag, values, step, bins=1000):
"""Log a histogram of the tensor of values."""
# Create a histogram using numpy
counts, bin_edges = np.histogram(values, bins=bins)
# Fill the fields of the histogram proto
hist = tf.HistogramProto()
hist.min = float(np.min(values))
hist.max = float(np.max(values))
hist.num = int(np.prod(values.shape))
hist.sum = float(np.sum(values))
hist.sum_squares = float(np.sum(values**2))
# Drop the start of the first bin
bin_edges = bin_edges[1:]
# Add bin edges and counts
for edge in bin_edges:
hist.bucket_limit.append(edge)
for c in counts:
hist.bucket.append(c)
# Create and write Summary
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])
self.writer.add_summary(summary, step)
self.writer.flush()
| ContrastiveSceneContexts-main | downstream/votenet/lib/utils/tf_logger.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" Helper functions for calculating 2D and 3D bounding box IoU.
Collected and written by Charles R. Qi
Last modified: Jul 2019
"""
from __future__ import print_function
import numpy as np
from scipy.spatial import ConvexHull
def polygon_clip(subjectPolygon, clipPolygon):
""" Clip a polygon with another polygon.
Ref: https://rosettacode.org/wiki/Sutherland-Hodgman_polygon_clipping#Python
Args:
subjectPolygon: a list of (x,y) 2d points, any polygon.
clipPolygon: a list of (x,y) 2d points, has to be *convex*
Note:
**points have to be counter-clockwise ordered**
Return:
a list of (x,y) vertex point for the intersection polygon.
"""
def inside(p):
return(cp2[0]-cp1[0])*(p[1]-cp1[1]) > (cp2[1]-cp1[1])*(p[0]-cp1[0])
def computeIntersection():
dc = [ cp1[0] - cp2[0], cp1[1] - cp2[1] ]
dp = [ s[0] - e[0], s[1] - e[1] ]
n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0]
n2 = s[0] * e[1] - s[1] * e[0]
n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0])
return [(n1*dp[0] - n2*dc[0]) * n3, (n1*dp[1] - n2*dc[1]) * n3]
outputList = subjectPolygon
cp1 = clipPolygon[-1]
for clipVertex in clipPolygon:
cp2 = clipVertex
inputList = outputList
outputList = []
s = inputList[-1]
for subjectVertex in inputList:
e = subjectVertex
if inside(e):
if not inside(s):
outputList.append(computeIntersection())
outputList.append(e)
elif inside(s):
outputList.append(computeIntersection())
s = e
cp1 = cp2
if len(outputList) == 0:
return None
return(outputList)
def poly_area(x,y):
""" Ref: http://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates """
return 0.5*np.abs(np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1)))
def convex_hull_intersection(p1, p2):
""" Compute area of two convex hull's intersection area.
p1,p2 are a list of (x,y) tuples of hull vertices.
return a list of (x,y) for the intersection and its volume
"""
inter_p = polygon_clip(p1,p2)
if inter_p is not None:
hull_inter = ConvexHull(inter_p)
return inter_p, hull_inter.volume
else:
return None, 0.0
def box3d_vol(corners):
''' corners: (8,3) no assumption on axis direction '''
a = np.sqrt(np.sum((corners[0,:] - corners[1,:])**2))
b = np.sqrt(np.sum((corners[1,:] - corners[2,:])**2))
c = np.sqrt(np.sum((corners[0,:] - corners[4,:])**2))
return a*b*c
def is_clockwise(p):
x = p[:,0]
y = p[:,1]
return np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1)) > 0
def box3d_iou(corners1, corners2):
''' Compute 3D bounding box IoU.
Input:
corners1: numpy array (8,3), assume up direction is negative Y
corners2: numpy array (8,3), assume up direction is negative Y
Output:
iou: 3D bounding box IoU
iou_2d: bird's eye view 2D bounding box IoU
todo (rqi): add more description on corner points' orders.
'''
# corner points are in counter clockwise order
rect1 = [(corners1[i,0], corners1[i,2]) for i in range(3,-1,-1)]
rect2 = [(corners2[i,0], corners2[i,2]) for i in range(3,-1,-1)]
area1 = poly_area(np.array(rect1)[:,0], np.array(rect1)[:,1])
area2 = poly_area(np.array(rect2)[:,0], np.array(rect2)[:,1])
inter, inter_area = convex_hull_intersection(rect1, rect2)
iou_2d = inter_area/(area1+area2-inter_area)
ymax = min(corners1[0,1], corners2[0,1])
ymin = max(corners1[4,1], corners2[4,1])
inter_vol = inter_area * max(0.0, ymax-ymin)
vol1 = box3d_vol(corners1)
vol2 = box3d_vol(corners2)
iou = inter_vol / (vol1 + vol2 - inter_vol)
return iou, iou_2d
def get_iou(bb1, bb2):
"""
Calculate the Intersection over Union (IoU) of two 2D bounding boxes.
Parameters
----------
bb1 : dict
Keys: {'x1', 'x2', 'y1', 'y2'}
The (x1, y1) position is at the top left corner,
the (x2, y2) position is at the bottom right corner
bb2 : dict
Keys: {'x1', 'x2', 'y1', 'y2'}
The (x, y) position is at the top left corner,
the (x2, y2) position is at the bottom right corner
Returns
-------
float
in [0, 1]
"""
assert bb1['x1'] < bb1['x2']
assert bb1['y1'] < bb1['y2']
assert bb2['x1'] < bb2['x2']
assert bb2['y1'] < bb2['y2']
# determine the coordinates of the intersection rectangle
x_left = max(bb1['x1'], bb2['x1'])
y_top = max(bb1['y1'], bb2['y1'])
x_right = min(bb1['x2'], bb2['x2'])
y_bottom = min(bb1['y2'], bb2['y2'])
if x_right < x_left or y_bottom < y_top:
return 0.0
# The intersection of two axis-aligned bounding boxes is always an
# axis-aligned bounding box
intersection_area = (x_right - x_left) * (y_bottom - y_top)
# compute the area of both AABBs
bb1_area = (bb1['x2'] - bb1['x1']) * (bb1['y2'] - bb1['y1'])
bb2_area = (bb2['x2'] - bb2['x1']) * (bb2['y2'] - bb2['y1'])
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = intersection_area / float(bb1_area + bb2_area - intersection_area)
assert iou >= 0.0
assert iou <= 1.0
return iou
def box2d_iou(box1, box2):
''' Compute 2D bounding box IoU.
Input:
box1: tuple of (xmin,ymin,xmax,ymax)
box2: tuple of (xmin,ymin,xmax,ymax)
Output:
iou: 2D IoU scalar
'''
return get_iou({'x1':box1[0], 'y1':box1[1], 'x2':box1[2], 'y2':box1[3]}, \
{'x1':box2[0], 'y1':box2[1], 'x2':box2[2], 'y2':box2[3]})
# -----------------------------------------------------------
# Convert from box parameters to
# -----------------------------------------------------------
def roty(t):
"""Rotation about the y-axis."""
c = np.cos(t)
s = np.sin(t)
return np.array([[c, 0, s],
[0, 1, 0],
[-s, 0, c]])
def roty_batch(t):
"""Rotation about the y-axis.
t: (x1,x2,...xn)
return: (x1,x2,...,xn,3,3)
"""
input_shape = t.shape
output = np.zeros(tuple(list(input_shape)+[3,3]))
c = np.cos(t)
s = np.sin(t)
output[...,0,0] = c
output[...,0,2] = s
output[...,1,1] = 1
output[...,2,0] = -s
output[...,2,2] = c
return output
def get_3d_box(box_size, heading_angle, center):
''' box_size is array(l,w,h), heading_angle is radius clockwise from pos x axis, center is xyz of box center
output (8,3) array for 3D box cornders
Similar to utils/compute_orientation_3d
'''
R = roty(heading_angle)
l,w,h = box_size
x_corners = [l/2,l/2,-l/2,-l/2,l/2,l/2,-l/2,-l/2];
y_corners = [h/2,h/2,h/2,h/2,-h/2,-h/2,-h/2,-h/2];
z_corners = [w/2,-w/2,-w/2,w/2,w/2,-w/2,-w/2,w/2];
corners_3d = np.dot(R, np.vstack([x_corners,y_corners,z_corners]))
corners_3d[0,:] = corners_3d[0,:] + center[0];
corners_3d[1,:] = corners_3d[1,:] + center[1];
corners_3d[2,:] = corners_3d[2,:] + center[2];
corners_3d = np.transpose(corners_3d)
return corners_3d
def get_3d_box_batch(box_size, heading_angle, center):
''' box_size: [x1,x2,...,xn,3]
heading_angle: [x1,x2,...,xn]
center: [x1,x2,...,xn,3]
Return:
[x1,x3,...,xn,8,3]
'''
input_shape = heading_angle.shape
R = roty_batch(heading_angle)
l = np.expand_dims(box_size[...,0], -1) # [x1,...,xn,1]
w = np.expand_dims(box_size[...,1], -1)
h = np.expand_dims(box_size[...,2], -1)
corners_3d = np.zeros(tuple(list(input_shape)+[8,3]))
corners_3d[...,:,0] = np.concatenate((l/2,l/2,-l/2,-l/2,l/2,l/2,-l/2,-l/2), -1)
corners_3d[...,:,1] = np.concatenate((h/2,h/2,h/2,h/2,-h/2,-h/2,-h/2,-h/2), -1)
corners_3d[...,:,2] = np.concatenate((w/2,-w/2,-w/2,w/2,w/2,-w/2,-w/2,w/2), -1)
tlist = [i for i in range(len(input_shape))]
tlist += [len(input_shape)+1, len(input_shape)]
corners_3d = np.matmul(corners_3d, np.transpose(R, tuple(tlist)))
corners_3d += np.expand_dims(center, -2)
return corners_3d
if __name__=='__main__':
# Function for polygon ploting
import matplotlib
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
import matplotlib.pyplot as plt
def plot_polys(plist,scale=500.0):
fig, ax = plt.subplots()
patches = []
for p in plist:
poly = Polygon(np.array(p)/scale, True)
patches.append(poly)
pc = PatchCollection(patches, cmap=matplotlib.cm.jet, alpha=0.5)
colors = 100*np.random.rand(len(patches))
pc.set_array(np.array(colors))
ax.add_collection(pc)
plt.show()
# Demo on ConvexHull
points = np.random.rand(30, 2) # 30 random points in 2-D
hull = ConvexHull(points)
# **In 2D "volume" is is area, "area" is perimeter
print(('Hull area: ', hull.volume))
for simplex in hull.simplices:
print(simplex)
# Demo on convex hull overlaps
sub_poly = [(0,0),(300,0),(300,300),(0,300)]
clip_poly = [(150,150),(300,300),(150,450),(0,300)]
inter_poly = polygon_clip(sub_poly, clip_poly)
print(poly_area(np.array(inter_poly)[:,0], np.array(inter_poly)[:,1]))
# Test convex hull interaction function
rect1 = [(50,0),(50,300),(300,300),(300,0)]
rect2 = [(150,150),(300,300),(150,450),(0,300)]
plot_polys([rect1, rect2])
inter, area = convex_hull_intersection(rect1, rect2)
print((inter, area))
if inter is not None:
print(poly_area(np.array(inter)[:,0], np.array(inter)[:,1]))
print('------------------')
rect1 = [(0.30026005199835404, 8.9408694211408424), \
(-1.1571105364358421, 9.4686676477075533), \
(0.1777082043006144, 13.154404877812102), \
(1.6350787927348105, 12.626606651245391)]
rect1 = [rect1[0], rect1[3], rect1[2], rect1[1]]
rect2 = [(0.23908745901608636, 8.8551095691132886), \
(-1.2771419487733995, 9.4269062966181956), \
(0.13138836963152717, 13.161896351296868), \
(1.647617777421013, 12.590099623791961)]
rect2 = [rect2[0], rect2[3], rect2[2], rect2[1]]
plot_polys([rect1, rect2])
inter, area = convex_hull_intersection(rect1, rect2)
print((inter, area))
| ContrastiveSceneContexts-main | downstream/votenet/lib/utils/box_util.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import glob, os
import numpy as np
import cv2
import argparse
from plyfile import PlyData, PlyElement
# params
parser = argparse.ArgumentParser()
# data paths
parser.add_argument('--input_path', required=True, help='path to sens file to read')
parser.add_argument('--output_path', required=True, help='path to output folder')
parser.add_argument('--save_npz', action='store_true')
opt = parser.parse_args()
print(opt)
if not os.path.exists(opt.output_path):
os.mkdir(opt.output_path)
# Load Depth Camera Intrinsic
depth_intrinsic = np.loadtxt(opt.input_path + '/intrinsic/intrinsic_depth.txt')
print('Depth intrinsic: ')
print(depth_intrinsic)
# Compute Camrea Distance (just for demo, so you can choose the camera distance in frame sampling)
poses = sorted(glob.glob(opt.input_path + '/pose/*.txt'), key=lambda a: int(os.path.basename(a).split('.')[0]))
depths = sorted(glob.glob(opt.input_path + '/depth/*.png'), key=lambda a: int(os.path.basename(a).split('.')[0]))
colors = sorted(glob.glob(opt.input_path + '/color/*.png'), key=lambda a: int(os.path.basename(a).split('.')[0]))
# # Get Aligned Point Clouds.
for ind, (pose, depth, color) in enumerate(zip(poses, depths, colors)):
name = os.path.basename(pose).split('.')[0]
if os.path.exists(opt.output_path + '/{}.npz'.format(name)):
continue
try:
print('='*50, ': {}'.format(pose))
depth_img = cv2.imread(depth, -1) # read 16bit grayscale image
mask = (depth_img != 0)
color_image = cv2.imread(color)
color_image = cv2.resize(color_image, (640, 480))
color_image = np.reshape(color_image[mask], [-1,3])
colors = np.zeros_like(color_image)
colors[:,0] = color_image[:,2]
colors[:,1] = color_image[:,1]
colors[:,2] = color_image[:,0]
pose = np.loadtxt(poses[ind])
print('Camera pose: ')
print(pose)
depth_shift = 1000.0
x,y = np.meshgrid(np.linspace(0,depth_img.shape[1]-1,depth_img.shape[1]), np.linspace(0,depth_img.shape[0]-1,depth_img.shape[0]))
uv_depth = np.zeros((depth_img.shape[0], depth_img.shape[1], 3))
uv_depth[:,:,0] = x
uv_depth[:,:,1] = y
uv_depth[:,:,2] = depth_img/depth_shift
uv_depth = np.reshape(uv_depth, [-1,3])
uv_depth = uv_depth[np.where(uv_depth[:,2]!=0),:].squeeze()
intrinsic_inv = np.linalg.inv(depth_intrinsic)
fx = depth_intrinsic[0,0]
fy = depth_intrinsic[1,1]
cx = depth_intrinsic[0,2]
cy = depth_intrinsic[1,2]
bx = depth_intrinsic[0,3]
by = depth_intrinsic[1,3]
point_list = []
n = uv_depth.shape[0]
points = np.ones((n,4))
X = (uv_depth[:,0]-cx)*uv_depth[:,2]/fx + bx
Y = (uv_depth[:,1]-cy)*uv_depth[:,2]/fy + by
points[:,0] = X
points[:,1] = Y
points[:,2] = uv_depth[:,2]
points_world = np.dot(points, np.transpose(pose))
print(points_world.shape)
pcd_save = np.zeros((points_world.shape[0], 7))
pcd_save[:,:3] = points_world[:,:3]
pcd_save[:,3:6] = colors
print('Saving npz file...')
np.savez(opt.output_path + '/{}.npz'.format(name), pcd=pcd_save)
except:
continue
| ContrastiveSceneContexts-main | pretrain/scannet_pair/point_cloud_extractor.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import glob, os, sys
from SensorData import SensorData
# params
parser = argparse.ArgumentParser()
# data paths
parser.add_argument('--target_dir', required=True, help='path to the target dir')
opt = parser.parse_args()
print(opt)
def main():
overlaps = glob.glob(os.path.join(opt.target_dir, "*/pcd/overlap.txt"))
with open(os.path.join(opt.target_dir, 'overlap30.txt'), 'w') as f:
for fo in overlaps:
for line in open(fo):
pcd0, pcd1, op = line.strip().split()
if float(op) >= 0.3:
print('{} {} {}'.format(pcd0, pcd1, op), file=f)
print('done')
if __name__ == '__main__':
main() | ContrastiveSceneContexts-main | pretrain/scannet_pair/generage_list.py |
import os, struct
import numpy as np
import zlib
import imageio
import cv2
COMPRESSION_TYPE_COLOR = {-1:'unknown', 0:'raw', 1:'png', 2:'jpeg'}
COMPRESSION_TYPE_DEPTH = {-1:'unknown', 0:'raw_ushort', 1:'zlib_ushort', 2:'occi_ushort'}
class RGBDFrame():
def load(self, file_handle):
self.camera_to_world = np.asarray(struct.unpack('f'*16, file_handle.read(16*4)), dtype=np.float32).reshape(4, 4)
self.timestamp_color = struct.unpack('Q', file_handle.read(8))[0]
self.timestamp_depth = struct.unpack('Q', file_handle.read(8))[0]
self.color_size_bytes = struct.unpack('Q', file_handle.read(8))[0]
self.depth_size_bytes = struct.unpack('Q', file_handle.read(8))[0]
self.color_data = b''.join(struct.unpack('c'*self.color_size_bytes, file_handle.read(self.color_size_bytes)))
self.depth_data = b''.join(struct.unpack('c'*self.depth_size_bytes, file_handle.read(self.depth_size_bytes)))
def decompress_depth(self, compression_type):
if compression_type == 'zlib_ushort':
return self.decompress_depth_zlib()
else:
raise
def decompress_depth_zlib(self):
return zlib.decompress(self.depth_data)
def decompress_color(self, compression_type):
if compression_type == 'jpeg':
return self.decompress_color_jpeg()
else:
raise
def decompress_color_jpeg(self):
return imageio.imread(self.color_data)
class SensorData:
def __init__(self, filename):
self.version = 4
self.load(filename)
def load(self, filename):
with open(filename, 'rb') as f:
version = struct.unpack('I', f.read(4))[0]
assert self.version == version
strlen = struct.unpack('Q', f.read(8))[0]
self.sensor_name = b''.join(struct.unpack('c'*strlen, f.read(strlen)))
self.intrinsic_color = np.asarray(struct.unpack('f'*16, f.read(16*4)), dtype=np.float32).reshape(4, 4)
self.extrinsic_color = np.asarray(struct.unpack('f'*16, f.read(16*4)), dtype=np.float32).reshape(4, 4)
self.intrinsic_depth = np.asarray(struct.unpack('f'*16, f.read(16*4)), dtype=np.float32).reshape(4, 4)
self.extrinsic_depth = np.asarray(struct.unpack('f'*16, f.read(16*4)), dtype=np.float32).reshape(4, 4)
self.color_compression_type = COMPRESSION_TYPE_COLOR[struct.unpack('i', f.read(4))[0]]
self.depth_compression_type = COMPRESSION_TYPE_DEPTH[struct.unpack('i', f.read(4))[0]]
self.color_width = struct.unpack('I', f.read(4))[0]
self.color_height = struct.unpack('I', f.read(4))[0]
self.depth_width = struct.unpack('I', f.read(4))[0]
self.depth_height = struct.unpack('I', f.read(4))[0]
self.depth_shift = struct.unpack('f', f.read(4))[0]
num_frames = struct.unpack('Q', f.read(8))[0]
self.frames = []
for i in range(num_frames):
frame = RGBDFrame()
frame.load(f)
self.frames.append(frame)
def export_depth_images(self, output_path, image_size=None, frame_skip=1):
if not os.path.exists(output_path):
os.makedirs(output_path)
print('exporting', len(self.frames)//frame_skip, ' depth frames to', output_path)
for f in range(0, len(self.frames), frame_skip):
if os.path.exists((os.path.join(output_path, str(f) + '.png'))):
continue
if f % 100 == 0:
print('exporting', f, 'th depth frames to', os.path.join(output_path, str(f) + '.png'))
depth_data = self.frames[f].decompress_depth(self.depth_compression_type)
depth = np.fromstring(depth_data, dtype=np.uint16).reshape(self.depth_height, self.depth_width)
if image_size is not None:
depth = cv2.resize(depth, (image_size[1], image_size[0]), interpolation=cv2.INTER_NEAREST)
imageio.imwrite(os.path.join(output_path, str(f) + '.png'), depth)
def export_color_images(self, output_path, image_size=None, frame_skip=1):
if not os.path.exists(output_path):
os.makedirs(output_path)
print('exporting', len(self.frames)//frame_skip, 'color frames to', output_path)
for f in range(0, len(self.frames), frame_skip):
if os.path.exists((os.path.join(output_path, str(f) + '.png'))):
continue
if f % 100 == 0:
print('exporting', f, 'th color frames to', os.path.join(output_path, str(f) + '.png'))
color = self.frames[f].decompress_color(self.color_compression_type)
if image_size is not None:
color = cv2.resize(color, (image_size[1], image_size[0]), interpolation=cv2.INTER_NEAREST)
# imageio.imwrite(os.path.join(output_path, str(f) + '.jpg'), color)
imageio.imwrite(os.path.join(output_path, str(f) + '.png'), color)
def save_mat_to_file(self, matrix, filename):
with open(filename, 'w') as f:
for line in matrix:
np.savetxt(f, line[np.newaxis], fmt='%f')
def export_poses(self, output_path, frame_skip=1):
if not os.path.exists(output_path):
os.makedirs(output_path)
print('exporting', len(self.frames)//frame_skip, 'camera poses to', output_path)
for f in range(0, len(self.frames), frame_skip):
self.save_mat_to_file(self.frames[f].camera_to_world, os.path.join(output_path, str(f) + '.txt'))
def export_intrinsics(self, output_path):
if not os.path.exists(output_path):
os.makedirs(output_path)
print('exporting camera intrinsics to', output_path)
self.save_mat_to_file(self.intrinsic_color, os.path.join(output_path, 'intrinsic_color.txt'))
self.save_mat_to_file(self.extrinsic_color, os.path.join(output_path, 'extrinsic_color.txt'))
self.save_mat_to_file(self.intrinsic_depth, os.path.join(output_path, 'intrinsic_depth.txt'))
self.save_mat_to_file(self.extrinsic_depth, os.path.join(output_path, 'extrinsic_depth.txt'))
| ContrastiveSceneContexts-main | pretrain/scannet_pair/SensorData.py |
# Copyright 2014 Darsh Ranjan
#
# This file is part of python-plyfile.
#
# python-plyfile is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# python-plyfile is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-plyfile. If not, see
# <http://www.gnu.org/licenses/>.
from itertools import islice as _islice
import numpy as _np
from sys import byteorder as _byteorder
try:
_range = xrange
except NameError:
_range = range
# Many-many relation
_data_type_relation = [
('int8', 'i1'),
('char', 'i1'),
('uint8', 'u1'),
('uchar', 'b1'),
('uchar', 'u1'),
('int16', 'i2'),
('short', 'i2'),
('uint16', 'u2'),
('ushort', 'u2'),
('int32', 'i4'),
('int', 'i4'),
('uint32', 'u4'),
('uint', 'u4'),
('float32', 'f4'),
('float', 'f4'),
('float64', 'f8'),
('double', 'f8')
]
_data_types = dict(_data_type_relation)
_data_type_reverse = dict((b, a) for (a, b) in _data_type_relation)
_types_list = []
_types_set = set()
for (_a, _b) in _data_type_relation:
if _a not in _types_set:
_types_list.append(_a)
_types_set.add(_a)
if _b not in _types_set:
_types_list.append(_b)
_types_set.add(_b)
_byte_order_map = {
'ascii': '=',
'binary_little_endian': '<',
'binary_big_endian': '>'
}
_byte_order_reverse = {
'<': 'binary_little_endian',
'>': 'binary_big_endian'
}
_native_byte_order = {'little': '<', 'big': '>'}[_byteorder]
def _lookup_type(type_str):
if type_str not in _data_type_reverse:
try:
type_str = _data_types[type_str]
except KeyError:
raise ValueError("field type %r not in %r" %
(type_str, _types_list))
return _data_type_reverse[type_str]
def _split_line(line, n):
fields = line.split(None, n)
if len(fields) == n:
fields.append('')
assert len(fields) == n + 1
return fields
def make2d(array, cols=None, dtype=None):
'''
Make a 2D array from an array of arrays. The `cols' and `dtype'
arguments can be omitted if the array is not empty.
'''
if (cols is None or dtype is None) and not len(array):
raise RuntimeError("cols and dtype must be specified for empty "
"array")
if cols is None:
cols = len(array[0])
if dtype is None:
dtype = array[0].dtype
return _np.fromiter(array, [('_', dtype, (cols,))],
count=len(array))['_']
class PlyParseError(Exception):
'''
Raised when a PLY file cannot be parsed.
The attributes `element', `row', `property', and `message' give
additional information.
'''
def __init__(self, message, element=None, row=None, prop=None):
self.message = message
self.element = element
self.row = row
self.prop = prop
s = ''
if self.element:
s += 'element %r: ' % self.element.name
if self.row is not None:
s += 'row %d: ' % self.row
if self.prop:
s += 'property %r: ' % self.prop.name
s += self.message
Exception.__init__(self, s)
def __repr__(self):
return ('PlyParseError(%r, element=%r, row=%r, prop=%r)' %
self.message, self.element, self.row, self.prop)
class PlyData(object):
'''
PLY file header and data.
A PlyData instance is created in one of two ways: by the static
method PlyData.read (to read a PLY file), or directly from __init__
given a sequence of elements (which can then be written to a PLY
file).
'''
def __init__(self, elements=[], text=False, byte_order='=',
comments=[], obj_info=[]):
'''
elements: sequence of PlyElement instances.
text: whether the resulting PLY file will be text (True) or
binary (False).
byte_order: '<' for little-endian, '>' for big-endian, or '='
for native. This is only relevant if `text' is False.
comments: sequence of strings that will be placed in the header
between the 'ply' and 'format ...' lines.
obj_info: like comments, but will be placed in the header with
"obj_info ..." instead of "comment ...".
'''
if byte_order == '=' and not text:
byte_order = _native_byte_order
self.byte_order = byte_order
self.text = text
self.comments = list(comments)
self.obj_info = list(obj_info)
self.elements = elements
def _get_elements(self):
return self._elements
def _set_elements(self, elements):
self._elements = tuple(elements)
self._index()
elements = property(_get_elements, _set_elements)
def _get_byte_order(self):
return self._byte_order
def _set_byte_order(self, byte_order):
if byte_order not in ['<', '>', '=']:
raise ValueError("byte order must be '<', '>', or '='")
self._byte_order = byte_order
byte_order = property(_get_byte_order, _set_byte_order)
def _index(self):
self._element_lookup = dict((elt.name, elt) for elt in
self._elements)
if len(self._element_lookup) != len(self._elements):
raise ValueError("two elements with same name")
@staticmethod
def _parse_header(stream):
'''
Parse a PLY header from a readable file-like stream.
'''
lines = []
comments = {'comment': [], 'obj_info': []}
while True:
line = stream.readline().decode('ascii').strip()
fields = _split_line(line, 1)
if fields[0] == 'end_header':
break
elif fields[0] in comments.keys():
lines.append(fields)
else:
lines.append(line.split())
a = 0
if lines[a] != ['ply']:
raise PlyParseError("expected 'ply'")
a += 1
while lines[a][0] in comments.keys():
comments[lines[a][0]].append(lines[a][1])
a += 1
if lines[a][0] != 'format':
raise PlyParseError("expected 'format'")
if lines[a][2] != '1.0':
raise PlyParseError("expected version '1.0'")
if len(lines[a]) != 3:
raise PlyParseError("too many fields after 'format'")
fmt = lines[a][1]
if fmt not in _byte_order_map:
raise PlyParseError("don't understand format %r" % fmt)
byte_order = _byte_order_map[fmt]
text = fmt == 'ascii'
a += 1
while a < len(lines) and lines[a][0] in comments.keys():
comments[lines[a][0]].append(lines[a][1])
a += 1
return PlyData(PlyElement._parse_multi(lines[a:]),
text, byte_order,
comments['comment'], comments['obj_info'])
@staticmethod
def read(stream):
'''
Read PLY data from a readable file-like object or filename.
'''
(must_close, stream) = _open_stream(stream, 'read')
try:
data = PlyData._parse_header(stream)
for elt in data:
elt._read(stream, data.text, data.byte_order)
finally:
if must_close:
stream.close()
return data
def write(self, stream):
'''
Write PLY data to a writeable file-like object or filename.
'''
(must_close, stream) = _open_stream(stream, 'write')
try:
stream.write(self.header.encode('ascii'))
stream.write(b'\r\n')
for elt in self:
elt._write(stream, self.text, self.byte_order)
finally:
if must_close:
stream.close()
@property
def header(self):
'''
Provide PLY-formatted metadata for the instance.
'''
lines = ['ply']
if self.text:
lines.append('format ascii 1.0')
else:
lines.append('format ' +
_byte_order_reverse[self.byte_order] +
' 1.0')
# Some information is lost here, since all comments are placed
# between the 'format' line and the first element.
for c in self.comments:
lines.append('comment ' + c)
for c in self.obj_info:
lines.append('obj_info ' + c)
lines.extend(elt.header for elt in self.elements)
lines.append('end_header')
return '\r\n'.join(lines)
def __iter__(self):
return iter(self.elements)
def __len__(self):
return len(self.elements)
def __contains__(self, name):
return name in self._element_lookup
def __getitem__(self, name):
return self._element_lookup[name]
def __str__(self):
return self.header
def __repr__(self):
return ('PlyData(%r, text=%r, byte_order=%r, '
'comments=%r, obj_info=%r)' %
(self.elements, self.text, self.byte_order,
self.comments, self.obj_info))
def _open_stream(stream, read_or_write):
if hasattr(stream, read_or_write):
return (False, stream)
try:
return (True, open(stream, read_or_write[0] + 'b'))
except TypeError:
raise RuntimeError("expected open file or filename")
class PlyElement(object):
'''
PLY file element.
A client of this library doesn't normally need to instantiate this
directly, so the following is only for the sake of documenting the
internals.
Creating a PlyElement instance is generally done in one of two ways:
as a byproduct of PlyData.read (when reading a PLY file) and by
PlyElement.describe (before writing a PLY file).
'''
def __init__(self, name, properties, count, comments=[]):
'''
This is not part of the public interface. The preferred methods
of obtaining PlyElement instances are PlyData.read (to read from
a file) and PlyElement.describe (to construct from a numpy
array).
'''
self._name = str(name)
self._check_name()
self._count = count
self._properties = tuple(properties)
self._index()
self.comments = list(comments)
self._have_list = any(isinstance(p, PlyListProperty)
for p in self.properties)
@property
def count(self):
return self._count
def _get_data(self):
return self._data
def _set_data(self, data):
self._data = data
self._count = len(data)
self._check_sanity()
data = property(_get_data, _set_data)
def _check_sanity(self):
for prop in self.properties:
if prop.name not in self._data.dtype.fields:
raise ValueError("dangling property %r" % prop.name)
def _get_properties(self):
return self._properties
def _set_properties(self, properties):
self._properties = tuple(properties)
self._check_sanity()
self._index()
properties = property(_get_properties, _set_properties)
def _index(self):
self._property_lookup = dict((prop.name, prop)
for prop in self._properties)
if len(self._property_lookup) != len(self._properties):
raise ValueError("two properties with same name")
def ply_property(self, name):
return self._property_lookup[name]
@property
def name(self):
return self._name
def _check_name(self):
if any(c.isspace() for c in self._name):
msg = "element name %r contains spaces" % self._name
raise ValueError(msg)
def dtype(self, byte_order='='):
'''
Return the numpy dtype of the in-memory representation of the
data. (If there are no list properties, and the PLY format is
binary, then this also accurately describes the on-disk
representation of the element.)
'''
return [(prop.name, prop.dtype(byte_order))
for prop in self.properties]
@staticmethod
def _parse_multi(header_lines):
'''
Parse a list of PLY element definitions.
'''
elements = []
while header_lines:
(elt, header_lines) = PlyElement._parse_one(header_lines)
elements.append(elt)
return elements
@staticmethod
def _parse_one(lines):
'''
Consume one element definition. The unconsumed input is
returned along with a PlyElement instance.
'''
a = 0
line = lines[a]
if line[0] != 'element':
raise PlyParseError("expected 'element'")
if len(line) > 3:
raise PlyParseError("too many fields after 'element'")
if len(line) < 3:
raise PlyParseError("too few fields after 'element'")
(name, count) = (line[1], int(line[2]))
comments = []
properties = []
while True:
a += 1
if a >= len(lines):
break
if lines[a][0] == 'comment':
comments.append(lines[a][1])
elif lines[a][0] == 'property':
properties.append(PlyProperty._parse_one(lines[a]))
else:
break
return (PlyElement(name, properties, count, comments),
lines[a:])
@staticmethod
def describe(data, name, len_types={}, val_types={},
comments=[]):
'''
Construct a PlyElement from an array's metadata.
len_types and val_types can be given as mappings from list
property names to type strings (like 'u1', 'f4', etc., or
'int8', 'float32', etc.). These can be used to define the length
and value types of list properties. List property lengths
always default to type 'u1' (8-bit unsigned integer), and value
types default to 'i4' (32-bit integer).
'''
if not isinstance(data, _np.ndarray):
raise TypeError("only numpy arrays are supported")
if len(data.shape) != 1:
raise ValueError("only one-dimensional arrays are "
"supported")
count = len(data)
properties = []
descr = data.dtype.descr
for t in descr:
if not isinstance(t[1], str):
raise ValueError("nested records not supported")
if not t[0]:
raise ValueError("field with empty name")
if len(t) != 2 or t[1][1] == 'O':
# non-scalar field, which corresponds to a list
# property in PLY.
if t[1][1] == 'O':
if len(t) != 2:
raise ValueError("non-scalar object fields not "
"supported")
len_str = _data_type_reverse[len_types.get(t[0], 'u1')]
if t[1][1] == 'O':
val_type = val_types.get(t[0], 'i4')
val_str = _lookup_type(val_type)
else:
val_str = _lookup_type(t[1][1:])
prop = PlyListProperty(t[0], len_str, val_str)
else:
val_str = _lookup_type(t[1][1:])
prop = PlyProperty(t[0], val_str)
properties.append(prop)
elt = PlyElement(name, properties, count, comments)
elt.data = data
return elt
def _read(self, stream, text, byte_order):
'''
Read the actual data from a PLY file.
'''
if text:
self._read_txt(stream)
else:
if self._have_list:
# There are list properties, so a simple load is
# impossible.
self._read_bin(stream, byte_order)
else:
# There are no list properties, so loading the data is
# much more straightforward.
self._data = _np.fromfile(stream,
self.dtype(byte_order),
self.count)
if len(self._data) < self.count:
k = len(self._data)
del self._data
raise PlyParseError("early end-of-file", self, k)
self._check_sanity()
def _write(self, stream, text, byte_order):
'''
Write the data to a PLY file.
'''
if text:
self._write_txt(stream)
else:
if self._have_list:
# There are list properties, so serialization is
# slightly complicated.
self._write_bin(stream, byte_order)
else:
# no list properties, so serialization is
# straightforward.
self.data.astype(self.dtype(byte_order),
copy=False).tofile(stream)
def _read_txt(self, stream):
'''
Load a PLY element from an ASCII-format PLY file. The element
may contain list properties.
'''
self._data = _np.empty(self.count, dtype=self.dtype())
k = 0
for line in _islice(iter(stream.readline, b''), self.count):
fields = iter(line.strip().split())
for prop in self.properties:
try:
self._data[prop.name][k] = prop._from_fields(fields)
except StopIteration:
raise PlyParseError("early end-of-line",
self, k, prop)
except ValueError:
raise PlyParseError("malformed input",
self, k, prop)
try:
next(fields)
except StopIteration:
pass
else:
raise PlyParseError("expected end-of-line", self, k)
k += 1
if k < self.count:
del self._data
raise PlyParseError("early end-of-file", self, k)
def _write_txt(self, stream):
'''
Save a PLY element to an ASCII-format PLY file. The element may
contain list properties.
'''
for rec in self.data:
fields = []
for prop in self.properties:
fields.extend(prop._to_fields(rec[prop.name]))
_np.savetxt(stream, [fields], '%.18g', newline='\r\n')
def _read_bin(self, stream, byte_order):
'''
Load a PLY element from a binary PLY file. The element may
contain list properties.
'''
self._data = _np.empty(self.count, dtype=self.dtype(byte_order))
for k in _range(self.count):
for prop in self.properties:
try:
self._data[prop.name][k] = \
prop._read_bin(stream, byte_order)
except StopIteration:
raise PlyParseError("early end-of-file",
self, k, prop)
def _write_bin(self, stream, byte_order):
'''
Save a PLY element to a binary PLY file. The element may
contain list properties.
'''
for rec in self.data:
for prop in self.properties:
prop._write_bin(rec[prop.name], stream, byte_order)
@property
def header(self):
'''
Format this element's metadata as it would appear in a PLY
header.
'''
lines = ['element %s %d' % (self.name, self.count)]
# Some information is lost here, since all comments are placed
# between the 'element' line and the first property definition.
for c in self.comments:
lines.append('comment ' + c)
lines.extend(list(map(str, self.properties)))
return '\r\n'.join(lines)
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
def __str__(self):
return self.header
def __repr__(self):
return ('PlyElement(%r, %r, count=%d, comments=%r)' %
(self.name, self.properties, self.count,
self.comments))
class PlyProperty(object):
'''
PLY property description. This class is pure metadata; the data
itself is contained in PlyElement instances.
'''
def __init__(self, name, val_dtype):
self._name = str(name)
self._check_name()
self.val_dtype = val_dtype
def _get_val_dtype(self):
return self._val_dtype
def _set_val_dtype(self, val_dtype):
self._val_dtype = _data_types[_lookup_type(val_dtype)]
val_dtype = property(_get_val_dtype, _set_val_dtype)
@property
def name(self):
return self._name
def _check_name(self):
if any(c.isspace() for c in self._name):
msg = "Error: property name %r contains spaces" % self._name
raise RuntimeError(msg)
@staticmethod
def _parse_one(line):
assert line[0] == 'property'
if line[1] == 'list':
if len(line) > 5:
raise PlyParseError("too many fields after "
"'property list'")
if len(line) < 5:
raise PlyParseError("too few fields after "
"'property list'")
return PlyListProperty(line[4], line[2], line[3])
else:
if len(line) > 3:
raise PlyParseError("too many fields after "
"'property'")
if len(line) < 3:
raise PlyParseError("too few fields after "
"'property'")
return PlyProperty(line[2], line[1])
def dtype(self, byte_order='='):
'''
Return the numpy dtype description for this property (as a tuple
of strings).
'''
return byte_order + self.val_dtype
def _from_fields(self, fields):
'''
Parse from generator. Raise StopIteration if the property could
not be read.
'''
return _np.dtype(self.dtype()).type(next(fields))
def _to_fields(self, data):
'''
Return generator over one item.
'''
yield _np.dtype(self.dtype()).type(data)
def _read_bin(self, stream, byte_order):
'''
Read data from a binary stream. Raise StopIteration if the
property could not be read.
'''
try:
return _np.fromfile(stream, self.dtype(byte_order), 1)[0]
except IndexError:
raise StopIteration
def _write_bin(self, data, stream, byte_order):
'''
Write data to a binary stream.
'''
_np.dtype(self.dtype(byte_order)).type(data).tofile(stream)
def __str__(self):
val_str = _data_type_reverse[self.val_dtype]
return 'property %s %s' % (val_str, self.name)
def __repr__(self):
return 'PlyProperty(%r, %r)' % (self.name,
_lookup_type(self.val_dtype))
class PlyListProperty(PlyProperty):
'''
PLY list property description.
'''
def __init__(self, name, len_dtype, val_dtype):
PlyProperty.__init__(self, name, val_dtype)
self.len_dtype = len_dtype
def _get_len_dtype(self):
return self._len_dtype
def _set_len_dtype(self, len_dtype):
self._len_dtype = _data_types[_lookup_type(len_dtype)]
len_dtype = property(_get_len_dtype, _set_len_dtype)
def dtype(self, byte_order='='):
'''
List properties always have a numpy dtype of "object".
'''
return '|O'
def list_dtype(self, byte_order='='):
'''
Return the pair (len_dtype, val_dtype) (both numpy-friendly
strings).
'''
return (byte_order + self.len_dtype,
byte_order + self.val_dtype)
def _from_fields(self, fields):
(len_t, val_t) = self.list_dtype()
n = int(_np.dtype(len_t).type(next(fields)))
data = _np.loadtxt(list(_islice(fields, n)), val_t, ndmin=1)
if len(data) < n:
raise StopIteration
return data
def _to_fields(self, data):
'''
Return generator over the (numerical) PLY representation of the
list data (length followed by actual data).
'''
(len_t, val_t) = self.list_dtype()
data = _np.asarray(data, dtype=val_t).ravel()
yield _np.dtype(len_t).type(data.size)
for x in data:
yield x
def _read_bin(self, stream, byte_order):
(len_t, val_t) = self.list_dtype(byte_order)
try:
n = _np.fromfile(stream, len_t, 1)[0]
except IndexError:
raise StopIteration
data = _np.fromfile(stream, val_t, n)
if len(data) < n:
raise StopIteration
return data
def _write_bin(self, data, stream, byte_order):
'''
Write data to a binary stream.
'''
(len_t, val_t) = self.list_dtype(byte_order)
data = _np.asarray(data, dtype=val_t).ravel()
_np.array(data.size, dtype=len_t).tofile(stream)
data.tofile(stream)
def __str__(self):
len_str = _data_type_reverse[self.len_dtype]
val_str = _data_type_reverse[self.val_dtype]
return 'property list %s %s %s' % (len_str, val_str, self.name)
def __repr__(self):
return ('PlyListProperty(%r, %r, %r)' %
(self.name,
_lookup_type(self.len_dtype),
_lookup_type(self.val_dtype)))
| ContrastiveSceneContexts-main | pretrain/scannet_pair/plyfile.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import numpy as np
import math
import glob, os
import argparse
import open3d as o3d
def make_open3d_point_cloud(xyz, color=None, voxel_size=None):
if np.isnan(xyz).any():
return None
xyz = xyz[:,:3]
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(xyz)
if color is not None:
pcd.colors = o3d.utility.Vector3dVector(color)
if voxel_size is not None:
pcd = pcd.voxel_down_sample(voxel_size)
return pcd
def compute_overlap_ratio(pcd0, pcd1, voxel_size):
pcd0_down = pcd0.voxel_down_sample(voxel_size)
pcd1_down = pcd1.voxel_down_sample(voxel_size)
matching01 = get_matching_indices(pcd0_down, pcd1_down, voxel_size * 1.5, 1)
matching10 = get_matching_indices(pcd1_down, pcd0_down, voxel_size * 1.5, 1)
overlap0 = float(len(matching01)) / float(len(pcd0_down.points))
overlap1 = float(len(matching10)) / float(len(pcd1_down.points))
return max(overlap0, overlap1)
def get_matching_indices(source, pcd_tree, search_voxel_size, K=None):
match_inds = []
for i, point in enumerate(source.points):
[_, idx, _] = pcd_tree.search_radius_vector_3d(point, search_voxel_size)
if K is not None:
idx = idx[:K]
for j in idx:
match_inds.append((i, j))
return match_inds
# params
parser = argparse.ArgumentParser()
# data paths
parser.add_argument('--input_path', required=True, help='path to sens file to read')
parser.add_argument('--voxel_size', type=float, default=0.05)
opt = parser.parse_args()
print(opt)
print('load point clouds and downsampling...')
_points = [
(pcd_name, make_open3d_point_cloud(np.load(pcd_name)['pcd'], voxel_size=opt.voxel_size))
for pcd_name in glob.glob(os.path.join(opt.input_path, "*.npz"))
]
points = [(pcd_name, pcd) for (pcd_name, pcd) in _points if pcd is not None]
print('load {} point clouds ({} invalid has been filtered), computing matching/overlapping'.format(
len(points), len(_points) - len(points)))
matching_matrix = np.zeros((len(points), len(points)))
for i, (pcd0_name, pcd0) in enumerate(points):
print('matching to...{}'.format(pcd0_name))
pcd0_tree = o3d.geometry.KDTreeFlann(copy.deepcopy(pcd0))
for j, (pcd1_name, pcd1) in enumerate(points):
if i == j:
continue
matching_matrix[i, j] = float(len(get_matching_indices(pcd1, pcd0_tree, 1.5 * opt.voxel_size, 1))) / float(len(pcd1.points))
# write to file
print('writing to file')
with open(os.path.join(opt.input_path, "overlap.txt"), 'w') as f:
for i, (pcd0_name, pcd0) in enumerate(points):
for j, (pcd1_name, pcd1) in enumerate(points):
if i < j:
overlap = max(matching_matrix[i, j], matching_matrix[j, i])
f.write("{} {} {}\n".format(pcd0_name, pcd1_name, overlap))
print('done.')
| ContrastiveSceneContexts-main | pretrain/scannet_pair/compute_full_overlapping.py |
import argparse
import os, sys
from SensorData import SensorData
# params
parser = argparse.ArgumentParser()
# data paths
parser.add_argument('--filename', required=True, help='path to sens file to read')
parser.add_argument('--output_path', required=True, help='path to output folder')
parser.add_argument('--export_depth_images', dest='export_depth_images', action='store_true')
parser.add_argument('--export_color_images', dest='export_color_images', action='store_true')
parser.add_argument('--export_poses', dest='export_poses', action='store_true')
parser.add_argument('--export_intrinsics', dest='export_intrinsics', action='store_true')
parser.add_argument('--frame_skip', type=int, default=25)
parser.set_defaults(export_depth_images=True, export_color_images=True, export_poses=True, export_intrinsics=True)
opt = parser.parse_args()
print(opt)
def main():
if not os.path.exists(opt.output_path):
os.makedirs(opt.output_path)
# load the data
print('loading %s...' % opt.filename)
sd = SensorData(opt.filename)
print('loaded!\n')
if opt.export_depth_images:
sd.export_depth_images(os.path.join(opt.output_path, 'depth'), frame_skip=opt.frame_skip)
if opt.export_color_images:
sd.export_color_images(os.path.join(opt.output_path, 'color'), frame_skip=opt.frame_skip)
if opt.export_poses:
sd.export_poses(os.path.join(opt.output_path, 'pose'), frame_skip=opt.frame_skip)
if opt.export_intrinsics:
sd.export_intrinsics(os.path.join(opt.output_path, 'intrinsic'))
if __name__ == '__main__':
main()
| ContrastiveSceneContexts-main | pretrain/scannet_pair/reader.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
import os
import json
import logging
import torch
from omegaconf import OmegaConf
from easydict import EasyDict as edict
import lib.multiprocessing_utils as mpu
import hydra
from lib.ddp_trainer import PointNCELossTrainer, PartitionPointNCELossTrainer, PartitionPointNCELossTrainerPointNet
ch = logging.StreamHandler(sys.stdout)
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(
format='%(asctime)s %(message)s', datefmt='%m/%d %H:%M:%S', handlers=[ch])
torch.manual_seed(0)
torch.cuda.manual_seed(0)
logging.basicConfig(level=logging.INFO, format="")
def get_trainer(trainer):
if trainer == 'PointNCELossTrainer':
return PointNCELossTrainer
elif trainer == 'PartitionPointNCELossTrainer':
return PartitionPointNCELossTrainer
elif trainer == 'PartitionPointNCELossTrainerPointNet':
return PartitionPointNCELossTrainerPointNet
else:
raise ValueError(f'Trainer {trainer} not found')
@hydra.main(config_path='config', config_name='defaults.yaml')
def main(config):
if os.path.exists('config.yaml'):
logging.info('===> Loading exsiting config file')
config = OmegaConf.load('config.yaml')
logging.info('===> Loaded exsiting config file')
logging.info('===> Configurations')
logging.info(config.pretty())
# Convert to dict
if config.misc.num_gpus > 1:
mpu.multi_proc_run(config.misc.num_gpus,
fun=single_proc_run, fun_args=(config,))
else:
single_proc_run(config)
def single_proc_run(config):
from lib.ddp_data_loaders import make_data_loader
train_loader = make_data_loader(
config,
int(config.trainer.batch_size / config.misc.num_gpus),
num_threads=int(config.misc.train_num_thread / config.misc.num_gpus))
Trainer = get_trainer(config.trainer.trainer)
trainer = Trainer(config=config, data_loader=train_loader)
if config.misc.is_train:
trainer.train()
else:
trainer.test()
if __name__ == "__main__":
os.environ['MKL_THREADING_LAYER'] = 'GNU'
main()
| ContrastiveSceneContexts-main | pretrain/contrastive_scene_contexts/ddp_train.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import sys
import os
from model.pointnet2.pointnet2_modules import PointnetSAModuleVotes, PointnetFPModule
from model.pointnet2.pointnet2_utils import furthest_point_sample
import MinkowskiEngine as ME
class PointNet2Backbone(nn.Module):
r"""
Backbone network for point cloud feature learning.
Based on Pointnet++ single-scale grouping network.
Parameters
----------
input_feature_dim: int
Number of input channels in the feature descriptor for each point.
e.g. 3 for RGB.
"""
def __init__(self, num_feats, n_out, config, D):
super().__init__()
input_feature_dim= 0
self.config = config
self.sa1 = PointnetSAModuleVotes(
npoint=2048,
radius=0.2,
nsample=64,
mlp=[input_feature_dim, 64, 64, 128],
use_xyz=True,
normalize_xyz=True
)
self.sa2 = PointnetSAModuleVotes(
npoint=1024,
radius=0.4,
nsample=32,
mlp=[128, 128, 128, 256],
use_xyz=True,
normalize_xyz=True
)
self.sa3 = PointnetSAModuleVotes(
npoint=512,
radius=0.8,
nsample=16,
mlp=[256, 128, 128, 256],
use_xyz=True,
normalize_xyz=True
)
self.sa4 = PointnetSAModuleVotes(
npoint=256,
radius=1.2,
nsample=16,
mlp=[256, 128, 128, 256],
use_xyz=True,
normalize_xyz=True
)
self.fp1 = PointnetFPModule(mlp=[256+256,256,256])
self.fp2 = PointnetFPModule(mlp=[256+256,256,256])
self.fp3 = PointnetFPModule(mlp=[256+128,256,128])
self.fp4 = PointnetFPModule(mlp=[128,128,32])
def _break_up_pc(self, pc):
xyz = pc[..., 0:3].contiguous()
features = (
pc[..., 3:].transpose(1, 2).contiguous()
if pc.size(-1) > 3 else None
)
return xyz, features
def forward(self, pointcloud: torch.cuda.FloatTensor, end_points=None):
r"""
Forward pass of the network
Parameters
----------
pointcloud: Variable(torch.cuda.FloatTensor)
(B, N, 3 + input_feature_dim) tensor
Point cloud to run predicts on
Each point in the point-cloud MUST
be formated as (x, y, z, features...)
Returns
----------
end_points: {XXX_xyz, XXX_features, XXX_inds}
XXX_xyz: float32 Tensor of shape (B,K,3)
XXX_features: float32 Tensor of shape (B,K,D)
XXX-inds: int64 Tensor of shape (B,K) values in [0,N-1]
"""
if not end_points: end_points = {}
xyz0, features0 = self._break_up_pc(pointcloud)
# --------- 4 SET ABSTRACTION LAYERS ---------
xyz, features, fps_inds = self.sa1(xyz0, features0)
end_points['sa1_inds'] = fps_inds
end_points['sa1_xyz'] = xyz
end_points['sa1_features'] = features
xyz, features, fps_inds = self.sa2(xyz, features) # this fps_inds is just 0,1,...,1023
end_points['sa2_inds'] = fps_inds
end_points['sa2_xyz'] = xyz
end_points['sa2_features'] = features
xyz, features, fps_inds = self.sa3(xyz, features) # this fps_inds is just 0,1,...,511
end_points['sa3_xyz'] = xyz
end_points['sa3_features'] = features
xyz, features, fps_inds = self.sa4(xyz, features) # this fps_inds is just 0,1,...,255
end_points['sa4_xyz'] = xyz
end_points['sa4_features'] = features
# --------- 2 FEATURE UPSAMPLING LAYERS --------
features = self.fp1(end_points['sa3_xyz'], end_points['sa4_xyz'], end_points['sa3_features'], end_points['sa4_features'])
features = self.fp2(end_points['sa2_xyz'], end_points['sa3_xyz'], end_points['sa2_features'], features)
features = self.fp3(end_points['sa1_xyz'], end_points['sa2_xyz'], end_points['sa1_features'], features)
features = self.fp4(xyz0 , end_points['sa1_xyz'], features0, features)
return features
| ContrastiveSceneContexts-main | pretrain/contrastive_scene_contexts/model/pointnet2backbone.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import model.res16unet as res16unet
import model.pointnet2backbone as pointnet2
MODELS = []
def add_models(module):
MODELS.extend([getattr(module, a) for a in dir(module) if 'Net' in a])
add_models(res16unet)
add_models(pointnet2)
def get_models():
'''Returns a tuple of sample models.'''
return MODELS
def load_model(name):
'''Creates and returns an instance of the model given its class name.
'''
all_models = get_models()
mdict = {model.__name__: model for model in all_models}
if name not in mdict:
print('Invalid model index. Options are:')
for model in all_models:
print('\t* {}'.format(model.__name__))
return None
NetClass = mdict[name]
return NetClass
| ContrastiveSceneContexts-main | pretrain/contrastive_scene_contexts/model/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from model.resnet import ResNetBase, get_norm
from model.modules.common import ConvType, NormType, conv, conv_tr
from model.modules.resnet_block import BasicBlock, Bottleneck
from MinkowskiEngine import MinkowskiReLU, MinkowskiGlobalPooling
from MinkowskiEngine import SparseTensor
import MinkowskiEngine.MinkowskiOps as me
import torch
import torch.nn as nn
class Res16UNetBase(ResNetBase):
BLOCK = None
PLANES = (32, 64, 128, 256, 256, 256, 256, 256)
DILATIONS = (1, 1, 1, 1, 1, 1, 1, 1)
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
INIT_DIM = 32
OUT_PIXEL_DIST = 1
NORM_TYPE = NormType.BATCH_NORM
NON_BLOCK_CONV_TYPE = ConvType.SPATIAL_HYPERCUBE
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS
def __init__(self,
in_channels,
out_channels,
config,
D=3):
super(Res16UNetBase, self).__init__(in_channels, out_channels, config, D)
self.normalize_feature = config.net.normalize_feature
def network_initialization(self, in_channels, out_channels, config, D):
dilations = self.DILATIONS
bn_momentum = config.opt.bn_momentum
def space_n_time_m(n, m):
return n if D == 3 else [n, n, n, m]
if D == 4:
self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1)
self.inplanes = self.INIT_DIM
self.conv0p1s1 = conv(
in_channels,
self.inplanes,
kernel_size=space_n_time_m(config.net.conv1_kernel_size, 1),
stride=1,
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn0 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.conv1p1s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn1 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block1 = self._make_layer(
self.BLOCK,
self.PLANES[0],
self.LAYERS[0],
dilation=dilations[0],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv2p2s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn2 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block2 = self._make_layer(
self.BLOCK,
self.PLANES[1],
self.LAYERS[1],
dilation=dilations[1],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv3p4s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn3 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block3 = self._make_layer(
self.BLOCK,
self.PLANES[2],
self.LAYERS[2],
dilation=dilations[2],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv4p8s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn4 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block4 = self._make_layer(
self.BLOCK,
self.PLANES[3],
self.LAYERS[3],
dilation=dilations[3],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr4p16s2 = conv_tr(
self.inplanes,
self.PLANES[4],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr4 = get_norm(self.NORM_TYPE, self.PLANES[4], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion
self.block5 = self._make_layer(
self.BLOCK,
self.PLANES[4],
self.LAYERS[4],
dilation=dilations[4],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr5p8s2 = conv_tr(
self.inplanes,
self.PLANES[5],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr5 = get_norm(self.NORM_TYPE, self.PLANES[5], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion
self.block6 = self._make_layer(
self.BLOCK,
self.PLANES[5],
self.LAYERS[5],
dilation=dilations[5],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr6p4s2 = conv_tr(
self.inplanes,
self.PLANES[6],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr6 = get_norm(self.NORM_TYPE, self.PLANES[6], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion
self.block7 = self._make_layer(
self.BLOCK,
self.PLANES[6],
self.LAYERS[6],
dilation=dilations[6],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr7p2s2 = conv_tr(
self.inplanes,
self.PLANES[7],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr7 = get_norm(self.NORM_TYPE, self.PLANES[7], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[7] + self.INIT_DIM
self.block8 = self._make_layer(
self.BLOCK,
self.PLANES[7],
self.LAYERS[7],
dilation=dilations[7],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.relu = MinkowskiReLU(inplace=True)
self.final = conv(self.PLANES[7], out_channels, kernel_size=1, stride=1, bias=True, D=D)
def forward(self, x):
out = self.conv0p1s1(x)
out = self.bn0(out)
out_p1 = self.relu(out)
out = self.conv1p1s2(out_p1)
out = self.bn1(out)
out = self.relu(out)
out_b1p2 = self.block1(out)
out = self.conv2p2s2(out_b1p2)
out = self.bn2(out)
out = self.relu(out)
out_b2p4 = self.block2(out)
out = self.conv3p4s2(out_b2p4)
out = self.bn3(out)
out = self.relu(out)
out_b3p8 = self.block3(out)
out = self.conv4p8s2(out_b3p8)
out = self.bn4(out)
out = self.relu(out)
encoder_out = self.block4(out)
out = self.convtr4p16s2(encoder_out)
out = self.bntr4(out)
out = self.relu(out)
out = me.cat(out, out_b3p8)
out = self.block5(out)
out = self.convtr5p8s2(out)
out = self.bntr5(out)
out = self.relu(out)
out = me.cat(out, out_b2p4)
out = self.block6(out)
out = self.convtr6p4s2(out)
out = self.bntr6(out)
out = self.relu(out)
out = me.cat(out, out_b1p2)
out = self.block7(out)
out = self.convtr7p2s2(out)
out = self.bntr7(out)
out = self.relu(out)
out = me.cat(out, out_p1)
out = self.block8(out)
contrastive = self.final(out)
if self.normalize_feature:
contrastive = SparseTensor(
contrastive.F / torch.norm(contrastive.F, p=2, dim=1, keepdim=True),
coords_key=contrastive.coords_key,
coords_manager=contrastive.coords_man)
return contrastive
class Res16UNet34(Res16UNetBase):
BLOCK = BasicBlock
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class Res16UNet34C(Res16UNet34):
PLANES = (32, 64, 128, 256, 256, 128, 96, 96)
class Res16UNet18(Res16UNetBase):
BLOCK = BasicBlock
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
class Res16UNet18A(Res16UNet18):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
| ContrastiveSceneContexts-main | pretrain/contrastive_scene_contexts/model/res16unet.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
import MinkowskiEngine as ME
from MinkowskiEngine import MinkowskiNetwork
from model.modules.common import ConvType, NormType, get_norm, conv, sum_pool
from model.modules.resnet_block import BasicBlock, Bottleneck
class Model(MinkowskiNetwork):
OUT_PIXEL_DIST = -1
def __init__(self, in_channels, out_channels, config, D, **kwargs):
super(Model, self).__init__(D)
self.in_channels = in_channels
self.out_channels = out_channels
self.config = config
class ResNetBase(Model):
BLOCK = None
LAYERS = ()
INIT_DIM = 64
PLANES = (64, 128, 256, 512)
OUT_PIXEL_DIST = 32
HAS_LAST_BLOCK = False
CONV_TYPE = ConvType.HYPERCUBE
def __init__(self, in_channels, out_channels, config, D=3, **kwargs):
assert self.BLOCK is not None
assert self.OUT_PIXEL_DIST > 0
super(ResNetBase, self).__init__(in_channels, out_channels, config, D, **kwargs)
self.network_initialization(in_channels, out_channels, config, D)
self.weight_initialization()
def network_initialization(self, in_channels, out_channels, config, D):
def space_n_time_m(n, m):
return n if D == 3 else [n, n, n, m]
if D == 4:
self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1)
dilations = config.dilations
bn_momentum = config.opt.bn_momentum
self.inplanes = self.INIT_DIM
self.conv1 = conv(
in_channels,
self.inplanes,
kernel_size=space_n_time_m(config.conv1_kernel_size, 1),
stride=1,
D=D)
self.bn1 = get_norm(NormType.BATCH_NORM, self.inplanes, D=self.D, bn_momentum=bn_momentum)
self.relu = ME.MinkowskiReLU(inplace=True)
self.pool = sum_pool(kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), D=D)
self.layer1 = self._make_layer(
self.BLOCK,
self.PLANES[0],
self.LAYERS[0],
stride=space_n_time_m(2, 1),
dilation=space_n_time_m(dilations[0], 1))
self.layer2 = self._make_layer(
self.BLOCK,
self.PLANES[1],
self.LAYERS[1],
stride=space_n_time_m(2, 1),
dilation=space_n_time_m(dilations[1], 1))
self.layer3 = self._make_layer(
self.BLOCK,
self.PLANES[2],
self.LAYERS[2],
stride=space_n_time_m(2, 1),
dilation=space_n_time_m(dilations[2], 1))
self.layer4 = self._make_layer(
self.BLOCK,
self.PLANES[3],
self.LAYERS[3],
stride=space_n_time_m(2, 1),
dilation=space_n_time_m(dilations[3], 1))
self.final = conv(
self.PLANES[3] * self.BLOCK.expansion, out_channels, kernel_size=1, bias=True, D=D)
def weight_initialization(self):
for m in self.modules():
if isinstance(m, ME.MinkowskiBatchNorm):
nn.init.constant_(m.bn.weight, 1)
nn.init.constant_(m.bn.bias, 0)
def _make_layer(self,
block,
planes,
blocks,
stride=1,
dilation=1,
norm_type=NormType.BATCH_NORM,
bn_momentum=0.1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False,
D=self.D),
get_norm(norm_type, planes * block.expansion, D=self.D, bn_momentum=bn_momentum),
)
layers = []
layers.append(
block(
self.inplanes,
planes,
stride=stride,
dilation=dilation,
downsample=downsample,
conv_type=self.CONV_TYPE,
D=self.D))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
stride=1,
dilation=dilation,
conv_type=self.CONV_TYPE,
D=self.D))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.pool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.final(x)
return x
| ContrastiveSceneContexts-main | pretrain/contrastive_scene_contexts/model/resnet.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
''' Modified based on Ref: https://github.com/erikwijmans/Pointnet2_PyTorch '''
import torch
import torch.nn as nn
from typing import List, Tuple
class SharedMLP(nn.Sequential):
def __init__(
self,
args: List[int],
*,
bn: bool = False,
activation=nn.ReLU(inplace=True),
preact: bool = False,
first: bool = False,
name: str = ""
):
super().__init__()
for i in range(len(args) - 1):
self.add_module(
name + 'layer{}'.format(i),
Conv2d(
args[i],
args[i + 1],
bn=(not first or not preact or (i != 0)) and bn,
activation=activation
if (not first or not preact or (i != 0)) else None,
preact=preact
)
)
class _BNBase(nn.Sequential):
def __init__(self, in_size, batch_norm=None, name=""):
super().__init__()
self.add_module(name + "bn", batch_norm(in_size))
nn.init.constant_(self[0].weight, 1.0)
nn.init.constant_(self[0].bias, 0)
class BatchNorm1d(_BNBase):
def __init__(self, in_size: int, *, name: str = ""):
super().__init__(in_size, batch_norm=nn.BatchNorm1d, name=name)
class BatchNorm2d(_BNBase):
def __init__(self, in_size: int, name: str = ""):
super().__init__(in_size, batch_norm=nn.BatchNorm2d, name=name)
class BatchNorm3d(_BNBase):
def __init__(self, in_size: int, name: str = ""):
super().__init__(in_size, batch_norm=nn.BatchNorm3d, name=name)
class _ConvBase(nn.Sequential):
def __init__(
self,
in_size,
out_size,
kernel_size,
stride,
padding,
activation,
bn,
init,
conv=None,
batch_norm=None,
bias=True,
preact=False,
name=""
):
super().__init__()
bias = bias and (not bn)
conv_unit = conv(
in_size,
out_size,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=bias
)
init(conv_unit.weight)
if bias:
nn.init.constant_(conv_unit.bias, 0)
if bn:
if not preact:
bn_unit = batch_norm(out_size)
else:
bn_unit = batch_norm(in_size)
if preact:
if bn:
self.add_module(name + 'bn', bn_unit)
if activation is not None:
self.add_module(name + 'activation', activation)
self.add_module(name + 'conv', conv_unit)
if not preact:
if bn:
self.add_module(name + 'bn', bn_unit)
if activation is not None:
self.add_module(name + 'activation', activation)
class Conv1d(_ConvBase):
def __init__(
self,
in_size: int,
out_size: int,
*,
kernel_size: int = 1,
stride: int = 1,
padding: int = 0,
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=nn.init.kaiming_normal_,
bias: bool = True,
preact: bool = False,
name: str = ""
):
super().__init__(
in_size,
out_size,
kernel_size,
stride,
padding,
activation,
bn,
init,
conv=nn.Conv1d,
batch_norm=BatchNorm1d,
bias=bias,
preact=preact,
name=name
)
class Conv2d(_ConvBase):
def __init__(
self,
in_size: int,
out_size: int,
*,
kernel_size: Tuple[int, int] = (1, 1),
stride: Tuple[int, int] = (1, 1),
padding: Tuple[int, int] = (0, 0),
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=nn.init.kaiming_normal_,
bias: bool = True,
preact: bool = False,
name: str = ""
):
super().__init__(
in_size,
out_size,
kernel_size,
stride,
padding,
activation,
bn,
init,
conv=nn.Conv2d,
batch_norm=BatchNorm2d,
bias=bias,
preact=preact,
name=name
)
class Conv3d(_ConvBase):
def __init__(
self,
in_size: int,
out_size: int,
*,
kernel_size: Tuple[int, int, int] = (1, 1, 1),
stride: Tuple[int, int, int] = (1, 1, 1),
padding: Tuple[int, int, int] = (0, 0, 0),
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=nn.init.kaiming_normal_,
bias: bool = True,
preact: bool = False,
name: str = ""
):
super().__init__(
in_size,
out_size,
kernel_size,
stride,
padding,
activation,
bn,
init,
conv=nn.Conv3d,
batch_norm=BatchNorm3d,
bias=bias,
preact=preact,
name=name
)
class FC(nn.Sequential):
def __init__(
self,
in_size: int,
out_size: int,
*,
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=None,
preact: bool = False,
name: str = ""
):
super().__init__()
fc = nn.Linear(in_size, out_size, bias=not bn)
if init is not None:
init(fc.weight)
if not bn:
nn.init.constant_(fc.bias, 0)
if preact:
if bn:
self.add_module(name + 'bn', BatchNorm1d(in_size))
if activation is not None:
self.add_module(name + 'activation', activation)
self.add_module(name + 'fc', fc)
if not preact:
if bn:
self.add_module(name + 'bn', BatchNorm1d(out_size))
if activation is not None:
self.add_module(name + 'activation', activation)
def set_bn_momentum_default(bn_momentum):
def fn(m):
if isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):
m.momentum = bn_momentum
return fn
class BNMomentumScheduler(object):
def __init__(
self, model, bn_lambda, last_epoch=-1,
setter=set_bn_momentum_default
):
if not isinstance(model, nn.Module):
raise RuntimeError(
"Class '{}' is not a PyTorch nn Module".format(
type(model).__name__
)
)
self.model = model
self.setter = setter
self.lmbd = bn_lambda
self.step(last_epoch + 1)
self.last_epoch = last_epoch
def step(self, epoch=None):
if epoch is None:
epoch = self.last_epoch + 1
self.last_epoch = epoch
self.model.apply(self.setter(self.lmbd(epoch)))
| ContrastiveSceneContexts-main | pretrain/contrastive_scene_contexts/model/pointnet2/pytorch_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import glob
import os
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
this_dir = os.path.dirname(os.path.abspath(__file__))
_ext_src_root = "_ext_src"
_ext_sources = glob.glob("{}/src/*.cpp".format(_ext_src_root)) + glob.glob(
"{}/src/*.cu".format(_ext_src_root)
)
setup(
name='pointnet2',
ext_modules=[
CUDAExtension(
name='pointnet2._ext',
sources=_ext_sources,
extra_compile_args={
"cxx": ["-O3"],
"nvcc": ["-O3", "-Xfatbin", "-compress-all"],
},
include_dirs=[os.path.join(this_dir, _ext_src_root, "include")],
)
],
cmdclass={
'build_ext': BuildExtension
}
)
| ContrastiveSceneContexts-main | pretrain/contrastive_scene_contexts/model/pointnet2/setup.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
''' Modified based on: https://github.com/erikwijmans/Pointnet2_PyTorch '''
from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
import torch
from torch.autograd import Function
import torch.nn as nn
import pytorch_utils as pt_utils
import sys
try:
import builtins
except:
import __builtin__ as builtins
try:
import pointnet2._ext as _ext
except ImportError:
if not getattr(builtins, "__POINTNET2_SETUP__", False):
raise ImportError(
"Could not import _ext module.\n"
"Please see the setup instructions in the README: "
"https://github.com/erikwijmans/Pointnet2_PyTorch/blob/master/README.rst"
)
if False:
# Workaround for type hints without depending on the `typing` module
from typing import *
class RandomDropout(nn.Module):
def __init__(self, p=0.5, inplace=False):
super(RandomDropout, self).__init__()
self.p = p
self.inplace = inplace
def forward(self, X):
theta = torch.Tensor(1).uniform_(0, self.p)[0]
return pt_utils.feature_dropout_no_scaling(X, theta, self.train, self.inplace)
class FurthestPointSampling(Function):
@staticmethod
def forward(ctx, xyz, npoint):
# type: (Any, torch.Tensor, int) -> torch.Tensor
r"""
Uses iterative furthest point sampling to select a set of npoint features that have the largest
minimum distance
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor where N > npoint
npoint : int32
number of features in the sampled set
Returns
-------
torch.Tensor
(B, npoint) tensor containing the set
"""
fps_inds = _ext.furthest_point_sampling(xyz, npoint)
ctx.mark_non_differentiable(fps_inds)
return fps_inds
@staticmethod
def backward(xyz, a=None):
return None, None
furthest_point_sample = FurthestPointSampling.apply
class GatherOperation(Function):
@staticmethod
def forward(ctx, features, idx):
# type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
features : torch.Tensor
(B, C, N) tensor
idx : torch.Tensor
(B, npoint) tensor of the features to gather
Returns
-------
torch.Tensor
(B, C, npoint) tensor
"""
_, C, N = features.size()
ctx.for_backwards = (idx, C, N)
return _ext.gather_points(features, idx)
@staticmethod
def backward(ctx, grad_out):
idx, C, N = ctx.for_backwards
grad_features = _ext.gather_points_grad(grad_out.contiguous(), idx, N)
return grad_features, None
gather_operation = GatherOperation.apply
class ThreeNN(Function):
@staticmethod
def forward(ctx, unknown, known):
# type: (Any, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]
r"""
Find the three nearest neighbors of unknown in known
Parameters
----------
unknown : torch.Tensor
(B, n, 3) tensor of known features
known : torch.Tensor
(B, m, 3) tensor of unknown features
Returns
-------
dist : torch.Tensor
(B, n, 3) l2 distance to the three nearest neighbors
idx : torch.Tensor
(B, n, 3) index of 3 nearest neighbors
"""
dist2, idx = _ext.three_nn(unknown, known)
return torch.sqrt(dist2), idx
@staticmethod
def backward(ctx, a=None, b=None):
return None, None
three_nn = ThreeNN.apply
class ThreeInterpolate(Function):
@staticmethod
def forward(ctx, features, idx, weight):
# type(Any, torch.Tensor, torch.Tensor, torch.Tensor) -> Torch.Tensor
r"""
Performs weight linear interpolation on 3 features
Parameters
----------
features : torch.Tensor
(B, c, m) Features descriptors to be interpolated from
idx : torch.Tensor
(B, n, 3) three nearest neighbors of the target features in features
weight : torch.Tensor
(B, n, 3) weights
Returns
-------
torch.Tensor
(B, c, n) tensor of the interpolated features
"""
B, c, m = features.size()
n = idx.size(1)
ctx.three_interpolate_for_backward = (idx, weight, m)
return _ext.three_interpolate(features, idx, weight)
@staticmethod
def backward(ctx, grad_out):
# type: (Any, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]
r"""
Parameters
----------
grad_out : torch.Tensor
(B, c, n) tensor with gradients of ouputs
Returns
-------
grad_features : torch.Tensor
(B, c, m) tensor with gradients of features
None
None
"""
idx, weight, m = ctx.three_interpolate_for_backward
grad_features = _ext.three_interpolate_grad(
grad_out.contiguous(), idx, weight, m
)
return grad_features, None, None
three_interpolate = ThreeInterpolate.apply
class GroupingOperation(Function):
@staticmethod
def forward(ctx, features, idx):
# type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
features : torch.Tensor
(B, C, N) tensor of features to group
idx : torch.Tensor
(B, npoint, nsample) tensor containing the indicies of features to group with
Returns
-------
torch.Tensor
(B, C, npoint, nsample) tensor
"""
B, nfeatures, nsample = idx.size()
_, C, N = features.size()
ctx.for_backwards = (idx, N)
return _ext.group_points(features, idx)
@staticmethod
def backward(ctx, grad_out):
# type: (Any, torch.tensor) -> Tuple[torch.Tensor, torch.Tensor]
r"""
Parameters
----------
grad_out : torch.Tensor
(B, C, npoint, nsample) tensor of the gradients of the output from forward
Returns
-------
torch.Tensor
(B, C, N) gradient of the features
None
"""
idx, N = ctx.for_backwards
grad_features = _ext.group_points_grad(grad_out.contiguous(), idx, N)
return grad_features, None
grouping_operation = GroupingOperation.apply
class BallQuery(Function):
@staticmethod
def forward(ctx, radius, nsample, xyz, new_xyz):
# type: (Any, float, int, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
radius : float
radius of the balls
nsample : int
maximum number of features in the balls
xyz : torch.Tensor
(B, N, 3) xyz coordinates of the features
new_xyz : torch.Tensor
(B, npoint, 3) centers of the ball query
Returns
-------
torch.Tensor
(B, npoint, nsample) tensor with the indicies of the features that form the query balls
"""
inds = _ext.ball_query(new_xyz, xyz, radius, nsample)
ctx.mark_non_differentiable(inds)
return inds
@staticmethod
def backward(ctx, a=None):
return None, None, None, None
ball_query = BallQuery.apply
class QueryAndGroup(nn.Module):
r"""
Groups with a ball query of radius
Parameters
---------
radius : float32
Radius of ball
nsample : int32
Maximum number of features to gather in the ball
"""
def __init__(self, radius, nsample, use_xyz=True, ret_grouped_xyz=False, normalize_xyz=False, sample_uniformly=False, ret_unique_cnt=False):
# type: (QueryAndGroup, float, int, bool) -> None
super(QueryAndGroup, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
self.ret_grouped_xyz = ret_grouped_xyz
self.normalize_xyz = normalize_xyz
self.sample_uniformly = sample_uniformly
self.ret_unique_cnt = ret_unique_cnt
if self.ret_unique_cnt:
assert(self.sample_uniformly)
def forward(self, xyz, new_xyz, features=None):
# type: (QueryAndGroup, torch.Tensor. torch.Tensor, torch.Tensor) -> Tuple[Torch.Tensor]
r"""
Parameters
----------
xyz : torch.Tensor
xyz coordinates of the features (B, N, 3)
new_xyz : torch.Tensor
centriods (B, npoint, 3)
features : torch.Tensor
Descriptors of the features (B, C, N)
Returns
-------
new_features : torch.Tensor
(B, 3 + C, npoint, nsample) tensor
"""
idx = ball_query(self.radius, self.nsample, xyz, new_xyz)
if self.sample_uniformly:
unique_cnt = torch.zeros((idx.shape[0], idx.shape[1]))
for i_batch in range(idx.shape[0]):
for i_region in range(idx.shape[1]):
unique_ind = torch.unique(idx[i_batch, i_region, :])
num_unique = unique_ind.shape[0]
unique_cnt[i_batch, i_region] = num_unique
sample_ind = torch.randint(0, num_unique, (self.nsample - num_unique,), dtype=torch.long)
all_ind = torch.cat((unique_ind, unique_ind[sample_ind]))
idx[i_batch, i_region, :] = all_ind
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping_operation(xyz_trans, idx) # (B, 3, npoint, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if self.normalize_xyz:
grouped_xyz /= self.radius
if features is not None:
grouped_features = grouping_operation(features, idx)
if self.use_xyz:
new_features = torch.cat(
[grouped_xyz, grouped_features], dim=1
) # (B, C + 3, npoint, nsample)
else:
new_features = grouped_features
else:
assert (
self.use_xyz
), "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
ret = [new_features]
if self.ret_grouped_xyz:
ret.append(grouped_xyz)
if self.ret_unique_cnt:
ret.append(unique_cnt)
if len(ret) == 1:
return ret[0]
else:
return tuple(ret)
class GroupAll(nn.Module):
r"""
Groups all features
Parameters
---------
"""
def __init__(self, use_xyz=True, ret_grouped_xyz=False):
# type: (GroupAll, bool) -> None
super(GroupAll, self).__init__()
self.use_xyz = use_xyz
def forward(self, xyz, new_xyz, features=None):
# type: (GroupAll, torch.Tensor, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor]
r"""
Parameters
----------
xyz : torch.Tensor
xyz coordinates of the features (B, N, 3)
new_xyz : torch.Tensor
Ignored
features : torch.Tensor
Descriptors of the features (B, C, N)
Returns
-------
new_features : torch.Tensor
(B, C + 3, 1, N) tensor
"""
grouped_xyz = xyz.transpose(1, 2).unsqueeze(2)
if features is not None:
grouped_features = features.unsqueeze(2)
if self.use_xyz:
new_features = torch.cat(
[grouped_xyz, grouped_features], dim=1
) # (B, 3 + C, 1, N)
else:
new_features = grouped_features
else:
new_features = grouped_xyz
if self.ret_grouped_xyz:
return new_features, grouped_xyz
else:
return new_features
| ContrastiveSceneContexts-main | pretrain/contrastive_scene_contexts/model/pointnet2/pointnet2_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
''' Testing customized ops. '''
import torch
from torch.autograd import gradcheck
import numpy as np
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
import pointnet2_utils
def test_interpolation_grad():
batch_size = 1
feat_dim = 2
m = 4
feats = torch.randn(batch_size, feat_dim, m, requires_grad=True).float().cuda()
def interpolate_func(inputs):
idx = torch.from_numpy(np.array([[[0,1,2],[1,2,3]]])).int().cuda()
weight = torch.from_numpy(np.array([[[1,1,1],[2,2,2]]])).float().cuda()
interpolated_feats = pointnet2_utils.three_interpolate(inputs, idx, weight)
return interpolated_feats
assert (gradcheck(interpolate_func, feats, atol=1e-1, rtol=1e-1))
if __name__=='__main__':
test_interpolation_grad()
| ContrastiveSceneContexts-main | pretrain/contrastive_scene_contexts/model/pointnet2/pointnet2_test.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
''' Pointnet2 layers.
Modified based on: https://github.com/erikwijmans/Pointnet2_PyTorch
Extended with the following:
1. Uniform sampling in each local region (sample_uniformly)
2. Return sampled points indices to support votenet.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
import pointnet2_utils
import pytorch_utils as pt_utils
from typing import List
class _PointnetSAModuleBase(nn.Module):
def __init__(self):
super().__init__()
self.npoint = None
self.groupers = None
self.mlps = None
def forward(self, xyz: torch.Tensor,
features: torch.Tensor = None) -> (torch.Tensor, torch.Tensor):
r"""
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor of the xyz coordinates of the features
features : torch.Tensor
(B, N, C) tensor of the descriptors of the the features
Returns
-------
new_xyz : torch.Tensor
(B, npoint, 3) tensor of the new features' xyz
new_features : torch.Tensor
(B, npoint, \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
new_features_list = []
xyz_flipped = xyz.transpose(1, 2).contiguous()
new_xyz = pointnet2_utils.gather_operation(
xyz_flipped,
pointnet2_utils.furthest_point_sample(xyz, self.npoint)
).transpose(1, 2).contiguous() if self.npoint is not None else None
for i in range(len(self.groupers)):
new_features = self.groupers[i](
xyz, new_xyz, features
) # (B, C, npoint, nsample)
new_features = self.mlps[i](
new_features
) # (B, mlp[-1], npoint, nsample)
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint)
new_features_list.append(new_features)
return new_xyz, torch.cat(new_features_list, dim=1)
class PointnetSAModuleMSG(_PointnetSAModuleBase):
r"""Pointnet set abstrction layer with multiscale grouping
Parameters
----------
npoint : int
Number of features
radii : list of float32
list of radii to group with
nsamples : list of int32
Number of samples in each ball query
mlps : list of list of int32
Spec of the pointnet before the global max_pool for each scale
bn : bool
Use batchnorm
"""
def __init__(
self,
*,
npoint: int,
radii: List[float],
nsamples: List[int],
mlps: List[List[int]],
bn: bool = True,
use_xyz: bool = True,
sample_uniformly: bool = False
):
super().__init__()
assert len(radii) == len(nsamples) == len(mlps)
self.npoint = npoint
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz, sample_uniformly=sample_uniformly)
if npoint is not None else pointnet2_utils.GroupAll(use_xyz)
)
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn))
class PointnetSAModule(PointnetSAModuleMSG):
r"""Pointnet set abstrction layer
Parameters
----------
npoint : int
Number of features
radius : float
Radius of ball
nsample : int
Number of samples in the ball query
mlp : list
Spec of the pointnet before the global max_pool
bn : bool
Use batchnorm
"""
def __init__(
self,
*,
mlp: List[int],
npoint: int = None,
radius: float = None,
nsample: int = None,
bn: bool = True,
use_xyz: bool = True
):
super().__init__(
mlps=[mlp],
npoint=npoint,
radii=[radius],
nsamples=[nsample],
bn=bn,
use_xyz=use_xyz
)
class PointnetSAModuleVotes(nn.Module):
''' Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG
with extra support for returning point indices for getting their GT votes '''
def __init__(
self,
*,
mlp: List[int],
npoint: int = None,
radius: float = None,
nsample: int = None,
bn: bool = True,
use_xyz: bool = True,
pooling: str = 'max',
sigma: float = None, # for RBF pooling
normalize_xyz: bool = False, # noramlize local XYZ with radius
sample_uniformly: bool = False,
ret_unique_cnt: bool = False
):
super().__init__()
self.npoint = npoint
self.radius = radius
self.nsample = nsample
self.pooling = pooling
self.mlp_module = None
self.use_xyz = use_xyz
self.sigma = sigma
if self.sigma is None:
self.sigma = self.radius/2
self.normalize_xyz = normalize_xyz
self.ret_unique_cnt = ret_unique_cnt
if npoint is not None:
self.grouper = pointnet2_utils.QueryAndGroup(radius, nsample,
use_xyz=use_xyz, ret_grouped_xyz=True, normalize_xyz=normalize_xyz,
sample_uniformly=sample_uniformly, ret_unique_cnt=ret_unique_cnt)
else:
self.grouper = pointnet2_utils.GroupAll(use_xyz, ret_grouped_xyz=True)
mlp_spec = mlp
if use_xyz and len(mlp_spec)>0:
mlp_spec[0] += 3
self.mlp_module = pt_utils.SharedMLP(mlp_spec, bn=bn)
def forward(self, xyz: torch.Tensor,
features: torch.Tensor = None,
inds: torch.Tensor = None) -> (torch.Tensor, torch.Tensor):
r"""
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor of the xyz coordinates of the features
features : torch.Tensor
(B, C, N) tensor of the descriptors of the the features
inds : torch.Tensor
(B, npoint) tensor that stores index to the xyz points (values in 0-N-1)
Returns
-------
new_xyz : torch.Tensor
(B, npoint, 3) tensor of the new features' xyz
new_features : torch.Tensor
(B, \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
inds: torch.Tensor
(B, npoint) tensor of the inds
"""
xyz_flipped = xyz.transpose(1, 2).contiguous()
if inds is None:
inds = pointnet2_utils.furthest_point_sample(xyz, self.npoint)
else:
assert(inds.shape[1] == self.npoint)
new_xyz = pointnet2_utils.gather_operation(
xyz_flipped, inds
).transpose(1, 2).contiguous() if self.npoint is not None else None
if not self.ret_unique_cnt:
grouped_features, grouped_xyz = self.grouper(
xyz, new_xyz, features
) # (B, C, npoint, nsample)
else:
grouped_features, grouped_xyz, unique_cnt = self.grouper(
xyz, new_xyz, features
) # (B, C, npoint, nsample), (B,3,npoint,nsample), (B,npoint)
new_features = self.mlp_module(
grouped_features
) # (B, mlp[-1], npoint, nsample)
if self.pooling == 'max':
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
elif self.pooling == 'avg':
new_features = F.avg_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
elif self.pooling == 'rbf':
# Use radial basis function kernel for weighted sum of features (normalized by nsample and sigma)
# Ref: https://en.wikipedia.org/wiki/Radial_basis_function_kernel
rbf = torch.exp(-1 * grouped_xyz.pow(2).sum(1,keepdim=False) / (self.sigma**2) / 2) # (B, npoint, nsample)
new_features = torch.sum(new_features * rbf.unsqueeze(1), -1, keepdim=True) / float(self.nsample) # (B, mlp[-1], npoint, 1)
new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint)
if not self.ret_unique_cnt:
return new_xyz, new_features, inds
else:
return new_xyz, new_features, inds, unique_cnt
class PointnetSAModuleMSGVotes(nn.Module):
''' Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG
with extra support for returning point indices for getting their GT votes '''
def __init__(
self,
*,
mlps: List[List[int]],
npoint: int,
radii: List[float],
nsamples: List[int],
bn: bool = True,
use_xyz: bool = True,
sample_uniformly: bool = False
):
super().__init__()
assert(len(mlps) == len(nsamples) == len(radii))
self.npoint = npoint
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz, sample_uniformly=sample_uniformly)
if npoint is not None else pointnet2_utils.GroupAll(use_xyz)
)
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn))
def forward(self, xyz: torch.Tensor,
features: torch.Tensor = None, inds: torch.Tensor = None) -> (torch.Tensor, torch.Tensor):
r"""
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor of the xyz coordinates of the features
features : torch.Tensor
(B, C, C) tensor of the descriptors of the the features
inds : torch.Tensor
(B, npoint) tensor that stores index to the xyz points (values in 0-N-1)
Returns
-------
new_xyz : torch.Tensor
(B, npoint, 3) tensor of the new features' xyz
new_features : torch.Tensor
(B, \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
inds: torch.Tensor
(B, npoint) tensor of the inds
"""
new_features_list = []
xyz_flipped = xyz.transpose(1, 2).contiguous()
if inds is None:
inds = pointnet2_utils.furthest_point_sample(xyz, self.npoint)
new_xyz = pointnet2_utils.gather_operation(
xyz_flipped, inds
).transpose(1, 2).contiguous() if self.npoint is not None else None
for i in range(len(self.groupers)):
new_features = self.groupers[i](
xyz, new_xyz, features
) # (B, C, npoint, nsample)
new_features = self.mlps[i](
new_features
) # (B, mlp[-1], npoint, nsample)
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint)
new_features_list.append(new_features)
return new_xyz, torch.cat(new_features_list, dim=1), inds
class PointnetFPModule(nn.Module):
r"""Propigates the features of one set to another
Parameters
----------
mlp : list
Pointnet module parameters
bn : bool
Use batchnorm
"""
def __init__(self, *, mlp: List[int], bn: bool = True):
super().__init__()
self.mlp = pt_utils.SharedMLP(mlp, bn=bn)
def forward(
self, unknown: torch.Tensor, known: torch.Tensor,
unknow_feats: torch.Tensor, known_feats: torch.Tensor
) -> torch.Tensor:
r"""
Parameters
----------
unknown : torch.Tensor
(B, n, 3) tensor of the xyz positions of the unknown features
known : torch.Tensor
(B, m, 3) tensor of the xyz positions of the known features
unknow_feats : torch.Tensor
(B, C1, n) tensor of the features to be propigated to
known_feats : torch.Tensor
(B, C2, m) tensor of features to be propigated
Returns
-------
new_features : torch.Tensor
(B, mlp[-1], n) tensor of the features of the unknown features
"""
if known is not None:
dist, idx = pointnet2_utils.three_nn(unknown, known)
dist_recip = 1.0 / (dist + 1e-8)
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = dist_recip / norm
interpolated_feats = pointnet2_utils.three_interpolate(
known_feats, idx, weight
)
else:
interpolated_feats = known_feats.expand(
*known_feats.size()[0:2], unknown.size(1)
)
if unknow_feats is not None:
new_features = torch.cat([interpolated_feats, unknow_feats],
dim=1) #(B, C2 + C1, n)
else:
new_features = interpolated_feats
new_features = new_features.unsqueeze(-1)
new_features = self.mlp(new_features)
return new_features.squeeze(-1)
class PointnetLFPModuleMSG(nn.Module):
''' Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG
learnable feature propagation layer.'''
def __init__(
self,
*,
mlps: List[List[int]],
radii: List[float],
nsamples: List[int],
post_mlp: List[int],
bn: bool = True,
use_xyz: bool = True,
sample_uniformly: bool = False
):
super().__init__()
assert(len(mlps) == len(nsamples) == len(radii))
self.post_mlp = pt_utils.SharedMLP(post_mlp, bn=bn)
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz,
sample_uniformly=sample_uniformly)
)
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn))
def forward(self, xyz2: torch.Tensor, xyz1: torch.Tensor,
features2: torch.Tensor, features1: torch.Tensor) -> torch.Tensor:
r""" Propagate features from xyz1 to xyz2.
Parameters
----------
xyz2 : torch.Tensor
(B, N2, 3) tensor of the xyz coordinates of the features
xyz1 : torch.Tensor
(B, N1, 3) tensor of the xyz coordinates of the features
features2 : torch.Tensor
(B, C2, N2) tensor of the descriptors of the the features
features1 : torch.Tensor
(B, C1, N1) tensor of the descriptors of the the features
Returns
-------
new_features1 : torch.Tensor
(B, \sum_k(mlps[k][-1]), N1) tensor of the new_features descriptors
"""
new_features_list = []
for i in range(len(self.groupers)):
new_features = self.groupers[i](
xyz1, xyz2, features1
) # (B, C1, N2, nsample)
new_features = self.mlps[i](
new_features
) # (B, mlp[-1], N2, nsample)
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], N2, 1)
new_features = new_features.squeeze(-1) # (B, mlp[-1], N2)
if features2 is not None:
new_features = torch.cat([new_features, features2],
dim=1) #(B, mlp[-1] + C2, N2)
new_features = new_features.unsqueeze(-1)
new_features = self.post_mlp(new_features)
new_features_list.append(new_features)
return torch.cat(new_features_list, dim=1).squeeze(-1)
if __name__ == "__main__":
from torch.autograd import Variable
torch.manual_seed(1)
torch.cuda.manual_seed_all(1)
xyz = Variable(torch.randn(2, 9, 3).cuda(), requires_grad=True)
xyz_feats = Variable(torch.randn(2, 9, 6).cuda(), requires_grad=True)
test_module = PointnetSAModuleMSG(
npoint=2, radii=[5.0, 10.0], nsamples=[6, 3], mlps=[[9, 3], [9, 6]]
)
test_module.cuda()
print(test_module(xyz, xyz_feats))
for _ in range(1):
_, new_features = test_module(xyz, xyz_feats)
new_features.backward(
torch.cuda.FloatTensor(*new_features.size()).fill_(1)
)
print(new_features)
print(xyz.grad)
| ContrastiveSceneContexts-main | pretrain/contrastive_scene_contexts/model/pointnet2/pointnet2_modules.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
from model.modules.common import ConvType, NormType, get_norm, conv
from MinkowskiEngine import MinkowskiReLU
class BasicBlockBase(nn.Module):
expansion = 1
NORM_TYPE = NormType.BATCH_NORM
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
conv_type=ConvType.HYPERCUBE,
bn_momentum=0.1,
D=3):
super(BasicBlockBase, self).__init__()
self.conv1 = conv(
inplanes, planes, kernel_size=3, stride=stride, dilation=dilation, conv_type=conv_type, D=D)
self.norm1 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum)
self.conv2 = conv(
planes,
planes,
kernel_size=3,
stride=1,
dilation=dilation,
bias=False,
conv_type=conv_type,
D=D)
self.norm2 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum)
self.relu = MinkowskiReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class BasicBlock(BasicBlockBase):
NORM_TYPE = NormType.BATCH_NORM
class BottleneckBase(nn.Module):
expansion = 4
NORM_TYPE = NormType.BATCH_NORM
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
conv_type=ConvType.HYPERCUBE,
bn_momentum=0.1,
D=3):
super(BottleneckBase, self).__init__()
self.conv1 = conv(inplanes, planes, kernel_size=1, D=D)
self.norm1 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum)
self.conv2 = conv(
planes, planes, kernel_size=3, stride=stride, dilation=dilation, conv_type=conv_type, D=D)
self.norm2 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum)
self.conv3 = conv(planes, planes * self.expansion, kernel_size=1, D=D)
self.norm3 = get_norm(self.NORM_TYPE, planes * self.expansion, D, bn_momentum=bn_momentum)
self.relu = MinkowskiReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.norm3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(BottleneckBase):
NORM_TYPE = NormType.BATCH_NORM
| ContrastiveSceneContexts-main | pretrain/contrastive_scene_contexts/model/modules/resnet_block.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree. | ContrastiveSceneContexts-main | pretrain/contrastive_scene_contexts/model/modules/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import collections
from enum import Enum
import MinkowskiEngine as ME
class NormType(Enum):
BATCH_NORM = 0
SPARSE_LAYER_NORM = 1
SPARSE_INSTANCE_NORM = 2
SPARSE_SWITCH_NORM = 3
def get_norm(norm_type, n_channels, D, bn_momentum=0.1):
if norm_type == NormType.BATCH_NORM:
return ME.MinkowskiBatchNorm(n_channels, momentum=bn_momentum)
elif norm_type == NormType.SPARSE_INSTANCE_NORM:
return ME.MinkowskiInstanceNorm(n_channels, D=D)
else:
raise ValueError(f'Norm type: {norm_type} not supported')
class ConvType(Enum):
"""
Define the kernel region type
"""
HYPERCUBE = 0, 'HYPERCUBE'
SPATIAL_HYPERCUBE = 1, 'SPATIAL_HYPERCUBE'
SPATIO_TEMPORAL_HYPERCUBE = 2, 'SPATIO_TEMPORAL_HYPERCUBE'
HYPERCROSS = 3, 'HYPERCROSS'
SPATIAL_HYPERCROSS = 4, 'SPATIAL_HYPERCROSS'
SPATIO_TEMPORAL_HYPERCROSS = 5, 'SPATIO_TEMPORAL_HYPERCROSS'
SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS = 6, 'SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS '
def __new__(cls, value, name):
member = object.__new__(cls)
member._value_ = value
member.fullname = name
return member
def __int__(self):
return self.value
# Covert the ConvType var to a RegionType var
conv_to_region_type = {
# kernel_size = [k, k, k, 1]
ConvType.HYPERCUBE: ME.RegionType.HYPERCUBE,
ConvType.SPATIAL_HYPERCUBE: ME.RegionType.HYPERCUBE,
ConvType.SPATIO_TEMPORAL_HYPERCUBE: ME.RegionType.HYPERCUBE,
ConvType.HYPERCROSS: ME.RegionType.HYPERCROSS,
ConvType.SPATIAL_HYPERCROSS: ME.RegionType.HYPERCROSS,
ConvType.SPATIO_TEMPORAL_HYPERCROSS: ME.RegionType.HYPERCROSS,
ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS: ME.RegionType.HYBRID
}
int_to_region_type = {m.value: m for m in ME.RegionType}
def convert_region_type(region_type):
"""
Convert the integer region_type to the corresponding RegionType enum object.
"""
return int_to_region_type[region_type]
def convert_conv_type(conv_type, kernel_size, D):
assert isinstance(conv_type, ConvType), "conv_type must be of ConvType"
region_type = conv_to_region_type[conv_type]
axis_types = None
if conv_type == ConvType.SPATIAL_HYPERCUBE:
# No temporal convolution
if isinstance(kernel_size, collections.Sequence):
kernel_size = kernel_size[:3]
else:
kernel_size = [
kernel_size,
] * 3
if D == 4:
kernel_size.append(1)
elif conv_type == ConvType.SPATIO_TEMPORAL_HYPERCUBE:
# conv_type conversion already handled
assert D == 4
elif conv_type == ConvType.HYPERCUBE:
# conv_type conversion already handled
pass
elif conv_type == ConvType.SPATIAL_HYPERCROSS:
if isinstance(kernel_size, collections.Sequence):
kernel_size = kernel_size[:3]
else:
kernel_size = [
kernel_size,
] * 3
if D == 4:
kernel_size.append(1)
elif conv_type == ConvType.HYPERCROSS:
# conv_type conversion already handled
pass
elif conv_type == ConvType.SPATIO_TEMPORAL_HYPERCROSS:
# conv_type conversion already handled
assert D == 4
elif conv_type == ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS:
# Define the CUBIC conv kernel for spatial dims and CROSS conv for temp dim
axis_types = [
ME.RegionType.HYPERCUBE,
] * 3
if D == 4:
axis_types.append(ME.RegionType.HYPERCROSS)
return region_type, axis_types, kernel_size
def conv(in_planes,
out_planes,
kernel_size,
stride=1,
dilation=1,
bias=False,
conv_type=ConvType.HYPERCUBE,
D=-1):
assert D > 0, 'Dimension must be a positive integer'
region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D)
kernel_generator = ME.KernelGenerator(
kernel_size, stride, dilation, region_type=region_type, axis_types=axis_types, dimension=D)
return ME.MinkowskiConvolution(
in_channels=in_planes,
out_channels=out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
has_bias=bias,
kernel_generator=kernel_generator,
dimension=D)
def conv_tr(in_planes,
out_planes,
kernel_size,
upsample_stride=1,
dilation=1,
bias=False,
conv_type=ConvType.HYPERCUBE,
D=-1):
assert D > 0, 'Dimension must be a positive integer'
region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D)
kernel_generator = ME.KernelGenerator(
kernel_size,
upsample_stride,
dilation,
region_type=region_type,
axis_types=axis_types,
dimension=D)
return ME.MinkowskiConvolutionTranspose(
in_channels=in_planes,
out_channels=out_planes,
kernel_size=kernel_size,
stride=upsample_stride,
dilation=dilation,
has_bias=bias,
kernel_generator=kernel_generator,
dimension=D)
def avg_pool(kernel_size,
stride=1,
dilation=1,
conv_type=ConvType.HYPERCUBE,
in_coords_key=None,
D=-1):
assert D > 0, 'Dimension must be a positive integer'
region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D)
kernel_generator = ME.KernelGenerator(
kernel_size, stride, dilation, region_type=region_type, axis_types=axis_types, dimension=D)
return ME.MinkowskiAvgPooling(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
kernel_generator=kernel_generator,
dimension=D)
def avg_unpool(kernel_size, stride=1, dilation=1, conv_type=ConvType.HYPERCUBE, D=-1):
assert D > 0, 'Dimension must be a positive integer'
region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D)
kernel_generator = ME.KernelGenerator(
kernel_size, stride, dilation, region_type=region_type, axis_types=axis_types, dimension=D)
return ME.MinkowskiAvgUnpooling(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
kernel_generator=kernel_generator,
dimension=D)
def sum_pool(kernel_size, stride=1, dilation=1, conv_type=ConvType.HYPERCUBE, D=-1):
assert D > 0, 'Dimension must be a positive integer'
region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D)
kernel_generator = ME.KernelGenerator(
kernel_size, stride, dilation, region_type=region_type, axis_types=axis_types, dimension=D)
return ME.MinkowskiSumPooling(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
kernel_generator=kernel_generator,
dimension=D)
| ContrastiveSceneContexts-main | pretrain/contrastive_scene_contexts/model/modules/common.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import random
class Compose:
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, coords, feats):
for transform in self.transforms:
coords, feats = transform(coords, feats)
return coords, feats
class Jitter:
def __init__(self, mu=0, sigma=0.01):
self.mu = mu
self.sigma = sigma
def __call__(self, coords, feats):
if random.random() < 0.95:
feats += np.random.normal(self.mu, self.sigma, (feats.shape[0], feats.shape[1]))
return coords, feats
| ContrastiveSceneContexts-main | pretrain/contrastive_scene_contexts/lib/transforms.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import time
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0.0
self.sq_sum = 0.0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
self.sq_sum += val**2 * n
self.var = self.sq_sum / self.count - self.avg ** 2
class Timer(object):
"""A simple timer."""
def __init__(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.avg = 0.
def reset(self):
self.total_time = 0
self.calls = 0
self.start_time = 0
self.diff = 0
self.avg = 0
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.avg = self.total_time / self.calls
if average:
return self.avg
else:
return self.diff
| ContrastiveSceneContexts-main | pretrain/contrastive_scene_contexts/lib/timer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree. | ContrastiveSceneContexts-main | pretrain/contrastive_scene_contexts/lib/__init__.py |
# Written by Chris Choy <[email protected]>
# Distributed under MIT License
import logging
import random
import torch
import torch.utils.data
import numpy as np
import glob
import os
import copy
from tqdm import tqdm
from scipy.linalg import expm, norm
from lib.io3d import write_triangle_mesh
import lib.transforms as t
import MinkowskiEngine as ME
from torch.utils.data.sampler import RandomSampler
from lib.data_sampler import DistributedInfSampler
import open3d as o3d
def make_open3d_point_cloud(xyz, color=None):
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(xyz)
if color is not None:
pcd.colors = o3d.utility.Vector3dVector(color)
return pcd
def get_matching_indices(source, target, trans, search_voxel_size, K=None):
source_copy = copy.deepcopy(source)
target_copy = copy.deepcopy(target)
source_copy.transform(trans)
pcd_tree = o3d.geometry.KDTreeFlann(target_copy)
match_inds = []
for i, point in enumerate(source_copy.points):
[_, idx, _] = pcd_tree.search_radius_vector_3d(point, search_voxel_size)
if K is not None:
idx = idx[:K]
for j in idx:
match_inds.append((i, j))
return match_inds
def default_collate_pair_fn(list_data):
xyz0, xyz1, coords0, coords1, feats0, feats1, label0, label1, instance0, instance1, matching_inds, trans, T0 = list(zip(*list_data))
xyz_batch0, coords_batch0, feats_batch0, label_batch0, instance_batch0 = [], [], [], [], []
xyz_batch1, coords_batch1, feats_batch1, label_batch1, instance_batch1 = [], [], [], [], []
matching_inds_batch, trans_batch, len_batch, T0_batch = [], [], [], []
batch_id = 0
curr_start_inds = np.zeros((1, 2))
for batch_id, _ in enumerate(coords0):
N0 = coords0[batch_id].shape[0]
N1 = coords1[batch_id].shape[0]
# Move batchids to the beginning
xyz_batch0.append(torch.from_numpy(xyz0[batch_id]))
coords_batch0.append(
torch.cat((torch.ones(N0, 1).float() * batch_id,
torch.from_numpy(coords0[batch_id]).float()), 1))
feats_batch0.append(torch.from_numpy(feats0[batch_id]))
label_batch0.append(torch.from_numpy(label0[batch_id]))
instance_batch0.append(torch.from_numpy(instance0[batch_id]))
xyz_batch1.append(torch.from_numpy(xyz1[batch_id]))
coords_batch1.append(
torch.cat((torch.ones(N1, 1).float() * batch_id,
torch.from_numpy(coords1[batch_id]).float()), 1))
feats_batch1.append(torch.from_numpy(feats1[batch_id]))
label_batch1.append(torch.from_numpy(label1[batch_id]))
instance_batch1.append(torch.from_numpy(instance1[batch_id]))
trans_batch.append(torch.from_numpy(trans[batch_id]))
T0_batch.append(torch.from_numpy(T0[batch_id]))
# in case 0 matching
if len(matching_inds[batch_id]) == 0:
matching_inds[batch_id].extend([0, 0])
matching_inds_batch.append(
torch.from_numpy(np.array(matching_inds[batch_id]) + curr_start_inds))
len_batch.append([N0, N1])
# Move the head
curr_start_inds[0, 0] += N0
curr_start_inds[0, 1] += N1
# Concatenate all lists
xyz_batch0 = torch.cat(xyz_batch0, 0).float()
coords_batch0 = torch.cat(coords_batch0, 0).float()
feats_batch0 = torch.cat(feats_batch0, 0).float()
label_batch0 = torch.cat(label_batch0, 0).int()
instance_batch0 = torch.cat(instance_batch0, 0).int()
xyz_batch1 = torch.cat(xyz_batch1, 0).float()
coords_batch1 = torch.cat(coords_batch1, 0).float()
feats_batch1 = torch.cat(feats_batch1, 0).float()
label_batch1 = torch.cat(label_batch1, 0).int()
instance_batch1 = torch.cat(instance_batch1, 0).int()
trans_batch = torch.cat(trans_batch, 0).float()
T0_batch = torch.stack(T0_batch, 0).float()
matching_inds_batch = torch.cat(matching_inds_batch, 0).int()
return {
'pcd0': xyz_batch0,
'pcd1': xyz_batch1,
'sinput0_C': coords_batch0,
'sinput0_F': feats_batch0,
'sinput0_L': label_batch0,
'sinput0_I': instance_batch1,
'sinput1_C': coords_batch1,
'sinput1_F': feats_batch1,
'sinput1_L': label_batch1,
'sinput1_I': instance_batch1,
'correspondences': matching_inds_batch,
'trans': trans_batch,
'T0': T0_batch,
'len_batch': len_batch,
}
# Rotation matrix along axis with angle theta
def M(axis, theta):
return expm(np.cross(np.eye(3), axis / norm(axis) * theta))
def sample_random_trans(pcd, randg, rotation_range=360):
T = np.eye(4)
R = M(randg.rand(3) - 0.5, rotation_range * np.pi / 180.0 * (randg.rand(1) - 0.5))
T[:3, :3] = R
T[:3, 3] = R.dot(-np.mean(pcd, axis=0))
return T
def sample_random_trans_z(pcd):
ROTATION_AUGMENTATION_BOUND = ((-np.pi / 64, np.pi / 64), (-np.pi / 64, np.pi / 64), (-np.pi, np.pi))
rot_mats = []
for axis_ind, rot_bound in enumerate(ROTATION_AUGMENTATION_BOUND):
theta = 0
axis = np.zeros(3)
axis[axis_ind] = 1
if rot_bound is not None:
theta = np.random.uniform(*rot_bound)
rot_mats.append(M(axis, theta))
# Use random order
np.random.shuffle(rot_mats)
rot_mat = rot_mats[0] @ rot_mats[1] @ rot_mats[2]
T = np.eye(4)
T[:3, :3] = rot_mat
T[:3, 3] = rot_mat.dot(-np.mean(pcd, axis=0))
return T
def only_trans(pcd):
T = np.eye(4)
T[:3, 3] = -np.mean(pcd, axis=0)
return T
class PairDataset(torch.utils.data.Dataset):
AUGMENT = None
def __init__(self,
phase,
transform=None,
random_scale=False,
manual_seed=False,
config=None):
self.phase = phase
self.files = []
self.data_objects = []
self.transform = transform
self.voxel_size = config.data.voxel_size
self.matching_search_voxel_size = \
config.data.voxel_size * config.trainer.positive_pair_search_voxel_size_multiplier
self.config = config
self.random_scale = random_scale
self.min_scale = 0.8
self.max_scale = 1.2
self.randg = np.random.RandomState()
if manual_seed:
self.reset_seed()
self.root = '/'
if phase == "train":
self.root_filelist = root = config.data.scannet_match_dir
else:
raise NotImplementedError
logging.info(f"Loading the subset {phase} from {root}")
fname_txt = os.path.join(self.root_filelist, 'splits/overlap30.txt')
with open(fname_txt) as f:
content = f.readlines()
fnames = [x.strip().split() for x in content]
for fname in fnames:
self.files.append([os.path.join(self.root_filelist, fname[0]),
os.path.join(self.root_filelist, fname[1])])
def reset_seed(self, seed=0):
logging.info(f"Resetting the data loader seed to {seed}")
self.randg.seed(seed)
def apply_transform(self, pts, trans):
R = trans[:3, :3]
T = trans[:3, 3]
pts = pts @ R.T + T
return pts
def __len__(self):
return len(self.files)
class ScanNetIndoorPairDataset(PairDataset):
OVERLAP_RATIO = None
AUGMENT = None
def __init__(self,
phase,
transform=None,
random_scale=False,
manual_seed=False,
config=None):
PairDataset.__init__(self, phase, transform, random_scale, manual_seed, config)
# add
self.CLASS_LABELS = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window',
'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator',
'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture')
self.VALID_CLASS_IDS = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39)
NUM_LABELS = 41 # Will be converted to 20 as defined in IGNORE_LABELS.
# 0-40
IGNORE_LABELS = tuple(set(range(41)) - set(self.VALID_CLASS_IDS))
self.label_map = {}
n_used = 0
for l in range(NUM_LABELS):
if l in IGNORE_LABELS:
self.label_map[l] = 255
else:
self.label_map[l] = n_used
n_used += 1
self.label_map[255] = 255
def get_correspondences(self, idx):
file0 = os.path.join(self.root, self.files[idx][0])
file1 = os.path.join(self.root, self.files[idx][1])
data0 = np.load(file0)
data1 = np.load(file1)
xyz0 = data0["pcd"][:,:3]
xyz1 = data1["pcd"][:,:3]
label0 = (data0["pcd"][:,6] / 1000).astype(np.int32)
label1 = (data1["pcd"][:,6] / 1000).astype(np.int32)
instance0 = (data0["pcd"][:,6] % 1000).astype(np.int32)
instance1 = (data1["pcd"][:,6] % 1000).astype(np.int32)
color0 = data0['pcd'][:,3:6]
color1 = data1['pcd'][:,3:6]
matching_search_voxel_size = self.matching_search_voxel_size
if self.random_scale and random.random() < 0.95:
scale = self.min_scale + \
(self.max_scale - self.min_scale) * random.random()
matching_search_voxel_size *= scale
xyz0 = scale * xyz0
xyz1 = scale * xyz1
if self.config.data.random_rotation_xyz:
T0 = sample_random_trans(xyz0, self.randg)
T1 = sample_random_trans(xyz1, self.randg)
else:
T0 = sample_random_trans_z(xyz0)
T1 = sample_random_trans_z(xyz1)
#else:
# T0 = only_trans(xyz0)
# T1 = only_trans(xyz1)
trans = T1 @ np.linalg.inv(T0)
xyz0 = self.apply_transform(xyz0, T0)
xyz1 = self.apply_transform(xyz1, T1)
# Voxelization
sel0 = ME.utils.sparse_quantize(xyz0 / self.voxel_size, return_index=True)
sel1 = ME.utils.sparse_quantize(xyz1 / self.voxel_size, return_index=True)
if not self.config.data.voxelize:
sel0 = sel0[np.random.choice(sel0.shape[0], self.config.data.num_points,
replace=self.config.data.num_points>sel0.shape[0])]
sel1 = sel1[np.random.choice(sel1.shape[0], self.config.data.num_points,
replace=self.config.data.num_points>sel1.shape[0])]
# Make point clouds using voxelized points
pcd0 = make_open3d_point_cloud(xyz0)
pcd1 = make_open3d_point_cloud(xyz1)
# Select features and points using the returned voxelized indices
pcd0.colors = o3d.utility.Vector3dVector(color0[sel0])
pcd1.colors = o3d.utility.Vector3dVector(color1[sel1])
pcd0.points = o3d.utility.Vector3dVector(np.array(pcd0.points)[sel0])
pcd1.points = o3d.utility.Vector3dVector(np.array(pcd1.points)[sel1])
label0 = label0[sel0]
label1 = label1[sel1]
color0 = color0[sel0]
color1 = color1[sel1]
instance0 = instance0[sel0]
instance1 = instance1[sel1]
matches = get_matching_indices(pcd0, pcd1, trans, matching_search_voxel_size)
# Get features
feats_train0, feats_train1 = [], []
feats_train0.append(color0)
feats_train1.append(color1)
feats0 = np.hstack(feats_train0)
feats1 = np.hstack(feats_train1)
# Get coords
xyz0 = np.array(pcd0.points)
xyz1 = np.array(pcd1.points)
if self.config.data.voxelize:
coords0 = np.floor(xyz0 / self.voxel_size)
coords1 = np.floor(xyz1 / self.voxel_size)
else:
coords0 = xyz0
coords1 = xyz1
#jitter color
if self.transform:
coords0, feats0 = self.transform(coords0, feats0)
coords1, feats1 = self.transform(coords1, feats1)
feats0 = feats0 / 255.0 - 0.5
feats1 = feats1 / 255.0 - 0.5
# label mapping for monitor
label0 = np.array([self.label_map[x] for x in label0], dtype=np.int)
label1 = np.array([self.label_map[x] for x in label1], dtype=np.int)
# NB(s9xie): xyz are coordinates in the original system;
# coords are sparse conv grid coords. (subject to a scaling factor)
# coords0 -> sinput0_C
# trans is T0*T1^-1
return (xyz0, xyz1, coords0, coords1, feats0, feats1, label0, label1, instance0, instance1, matches, trans, T0)
def __getitem__(self, idx):
return self.get_correspondences(idx)
class ScanNetMatchPairDataset(ScanNetIndoorPairDataset):
OVERLAP_RATIO = 0.3
DATA_FILES = {
'train': './config/train_scannet.txt',
}
ALL_DATASETS = [ScanNetMatchPairDataset]
dataset_str_mapping = {d.__name__: d for d in ALL_DATASETS}
def make_data_loader(config, batch_size, num_threads=0):
if config.data.dataset not in dataset_str_mapping.keys():
logging.error(f'Dataset {config.data.dataset}, does not exists in ' +
', '.join(dataset_str_mapping.keys()))
Dataset = dataset_str_mapping[config.data.dataset]
transforms = []
transforms.append(t.Jitter())
dset = Dataset(
phase="train",
transform=t.Compose(transforms),
random_scale=False,
config=config)
collate_pair_fn = default_collate_pair_fn
if config.misc.num_gpus > 1:
sampler = DistributedInfSampler(dset)
else:
sampler = None
loader = torch.utils.data.DataLoader(
dset,
batch_size=batch_size,
shuffle=False if sampler else True,
num_workers=num_threads,
collate_fn=collate_pair_fn,
pin_memory=False,
sampler=sampler,
drop_last=True)
return loader
| ContrastiveSceneContexts-main | pretrain/contrastive_scene_contexts/lib/ddp_data_loaders.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import os.path as osp
import gc
import logging
import numpy as np
import json
from omegaconf import OmegaConf
import torch.nn as nn
import torch
import torch.optim as optim
import torch.nn.functional as F
from tensorboardX import SummaryWriter
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler
from lib.data_sampler import InfSampler, DistributedInfSampler
from model import load_model
from lib.timer import Timer, AverageMeter
import MinkowskiEngine as ME
import lib.distributed as du
import torch.distributed as dist
from lib.criterion import NCESoftmaxLoss
from torch.serialization import default_restore_location
torch.autograd.set_detect_anomaly(True)
LARGE_NUM = 1e9
def apply_transform(pts, trans):
voxel_size = 0.025
R = trans[:3, :3]
T = trans[:3, 3]
pts = pts * voxel_size
pts = torch.matmul(pts - T, torch.inverse(R.T))
pts = pts - torch.mean(pts, 0)
pts = pts / voxel_size
return pts
def _hash(arr, M):
if isinstance(arr, np.ndarray):
N, D = arr.shape
else:
N, D = len(arr[0]), len(arr)
hash_vec = np.zeros(N, dtype=np.int64)
for d in range(D):
if isinstance(arr, np.ndarray):
hash_vec += arr[:, d] * M**d
else:
hash_vec += arr[d] * M**d
return hash_vec
def load_state(model, weights, lenient_weight_loading=False):
if du.get_world_size() > 1:
_model = model.module
else:
_model = model
if lenient_weight_loading:
model_state = _model.state_dict()
filtered_weights = {
k: v for k, v in weights.items() if k in model_state and v.size() == model_state[k].size()
}
logging.info("Load weights:" + ', '.join(filtered_weights.keys()))
weights = model_state
weights.update(filtered_weights)
_model.load_state_dict(weights, strict=True)
def shuffle_loader(data_loader, cur_epoch):
assert isinstance(data_loader.sampler, (RandomSampler, InfSampler, DistributedSampler, DistributedInfSampler))
if isinstance(data_loader.sampler, DistributedSampler):
data_loader.sampler.set_epoch(cur_epoch)
class ContrastiveLossTrainer:
def __init__(
self,
config,
data_loader):
assert config.misc.use_gpu and torch.cuda.is_available(), "DDP mode must support GPU"
num_feats = 3 # always 3 for finetuning.
self.is_master = du.is_master_proc(config.misc.num_gpus) if config.misc.num_gpus > 1 else True
# Model initialization
self.cur_device = torch.cuda.current_device()
Model = load_model(config.net.model)
model = Model(
num_feats,
config.net.model_n_out,
config,
D=3)
model = model.cuda(device=self.cur_device)
if config.misc.num_gpus > 1:
model = torch.nn.parallel.DistributedDataParallel(
module=model,
device_ids=[self.cur_device],
output_device=self.cur_device,
broadcast_buffers=False,
)
self.config = config
self.model = model
self.optimizer = getattr(optim, config.opt.optimizer)(
model.parameters(),
lr=config.opt.lr,
momentum=config.opt.momentum,
weight_decay=config.opt.weight_decay)
self.scheduler = optim.lr_scheduler.ExponentialLR(self.optimizer, config.opt.exp_gamma)
self.curr_iter = 0
self.batch_size = data_loader.batch_size
self.data_loader = data_loader
self.neg_thresh = config.trainer.neg_thresh
self.pos_thresh = config.trainer.pos_thresh
#---------------- optional: resume checkpoint by given path ----------------------
if config.net.weight:
if self.is_master:
logging.info('===> Loading weights: ' + config.net.weight)
state = torch.load(config.net.weight, map_location=lambda s, l: default_restore_location(s, 'cpu'))
load_state(model, state['state_dict'], config.misc.lenient_weight_loading)
if self.is_master:
logging.info('===> Loaded weights: ' + config.net.weight)
#---------------- default: resume checkpoint in current folder ----------------------
checkpoint_fn = 'weights/weights.pth'
if osp.isfile(checkpoint_fn):
if self.is_master:
logging.info("=> loading checkpoint '{}'".format(checkpoint_fn))
state = torch.load(checkpoint_fn, map_location=lambda s, l: default_restore_location(s, 'cpu'))
self.curr_iter = state['curr_iter']
load_state(model, state['state_dict'])
self.optimizer.load_state_dict(state['optimizer'])
self.scheduler.load_state_dict(state['scheduler'])
if self.is_master:
logging.info("=> loaded checkpoint '{}' (curr_iter {})".format(checkpoint_fn, state['curr_iter']))
else:
logging.info("=> no checkpoint found at '{}'".format(checkpoint_fn))
if self.is_master:
self.writer = SummaryWriter(logdir='logs')
if not os.path.exists('weights'):
os.makedirs('weights', mode=0o755)
OmegaConf.save(config, 'config.yaml')
# added
from lib.shape_context import ShapeContext
self.partitioner = ShapeContext(r1=config.shape_context.r1,
r2=config.shape_context.r2,
nbins_xy=config.shape_context.nbins_xy,
nbins_zy=config.shape_context.nbins_zy)
def pdist(self, A, B):
D2 = torch.sum((A.unsqueeze(1) - B.unsqueeze(0)).pow(2), 2)
return torch.sqrt(D2 + 1e-7)
def _save_checkpoint(self, curr_iter, filename='checkpoint'):
if not self.is_master:
return
_model = self.model.module if du.get_world_size() > 1 else self.model
state = {
'curr_iter': curr_iter,
'state_dict': _model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'scheduler': self.scheduler.state_dict(),
}
filepath = os.path.join('weights', f'{filename}.pth')
logging.info("Saving checkpoint: {} ...".format(filepath))
torch.save(state, filepath)
# Delete symlink if it exists
if os.path.exists('weights/weights.pth'):
os.remove('weights/weights.pth')
# Create symlink
os.system('ln -s {}.pth weights/weights.pth'.format(filename))
class PointNCELossTrainer(ContrastiveLossTrainer):
def __init__(
self,
config,
data_loader):
ContrastiveLossTrainer.__init__(self, config, data_loader)
self.T = config.misc.nceT
self.npos = config.misc.npos
self.stat_freq = config.trainer.stat_freq
self.lr_update_freq = config.trainer.lr_update_freq
self.checkpoint_freq = config.trainer.checkpoint_freq
def compute_loss(self, q, k, mask=None):
npos = q.shape[0]
logits = torch.mm(q, k.transpose(1, 0)) # npos by npos
labels = torch.arange(npos).cuda().long()
out = torch.div(logits, self.T)
out = out.squeeze().contiguous()
if mask != None:
out = out - LARGE_NUM * mask.float()
criterion = NCESoftmaxLoss().cuda()
loss = criterion(out, labels)
return loss
def train(self):
curr_iter = self.curr_iter
data_loader_iter = self.data_loader.__iter__()
data_meter, data_timer, total_timer = AverageMeter(), Timer(), Timer()
while (curr_iter < self.config.opt.max_iter):
curr_iter += 1
epoch = curr_iter / len(self.data_loader)
batch_loss = self._train_iter(data_loader_iter, [data_meter, data_timer, total_timer])
# update learning rate
if curr_iter % self.lr_update_freq == 0 or curr_iter == 1:
lr = self.scheduler.get_last_lr()
self.scheduler.step()
# Print logs
if curr_iter % self.stat_freq == 0 and self.is_master:
self.writer.add_scalar('train/loss', batch_loss['loss'], curr_iter)
logging.info(
"Train Epoch: {:.3f} [{}/{}], Current Loss: {:.3e}"
.format(epoch, curr_iter,
len(self.data_loader), batch_loss['loss']) +
"\tData time: {:.4f}, Train time: {:.4f}, Iter time: {:.4f}, LR: {}".format(
data_meter.avg, total_timer.avg - data_meter.avg, total_timer.avg, self.scheduler.get_last_lr()))
data_meter.reset()
total_timer.reset()
# save checkpoint
if self.is_master and curr_iter % self.checkpoint_freq == 0:
lr = self.scheduler.get_last_lr()
logging.info(f" Epoch: {epoch}, LR: {lr}")
checkpoint_name = 'checkpoint'
if not self.config.trainer.overwrite_checkpoint:
checkpoint_name += '_{}'.format(curr_iter)
self._save_checkpoint(curr_iter, checkpoint_name)
def _train_iter(self, data_loader_iter, timers):
data_meter, data_timer, total_timer = timers
self.optimizer.zero_grad()
batch_loss = {
'loss': 0.0,
}
data_time = 0
total_timer.tic()
data_timer.tic()
input_dict = data_loader_iter.next()
data_time += data_timer.toc(average=False)
sinput0 = ME.SparseTensor(
input_dict['sinput0_F'], coords=input_dict['sinput0_C']).to(self.cur_device)
F0 = self.model(sinput0)
F0 = F0.F
sinput1 = ME.SparseTensor(
input_dict['sinput1_F'], coords=input_dict['sinput1_C']).to(self.cur_device)
F1 = self.model(sinput1)
F1 = F1.F
N0, N1 = input_dict['pcd0'].shape[0], input_dict['pcd1'].shape[0]
pos_pairs = input_dict['correspondences'].to(self.cur_device)
q_unique, count = pos_pairs[:, 0].unique(return_counts=True)
uniform = torch.distributions.Uniform(0, 1).sample([len(count)]).to(self.cur_device)
off = torch.floor(uniform*count).long()
cums = torch.cat([torch.tensor([0], device=self.cur_device), torch.cumsum(count, dim=0)[0:-1]], dim=0)
k_sel = pos_pairs[:, 1][off+cums]
if self.npos < q_unique.shape[0]:
sampled_inds = np.random.choice(q_unique.shape[0], self.npos, replace=False)
q_unique = q_unique[sampled_inds]
k_sel = k_sel[sampled_inds]
q = F0[q_unique.long()]
k = F1[k_sel.long()]
loss = self.compute_loss(q,k)
loss.backward()
result = {"loss": loss}
if self.config.misc.num_gpus > 1:
result = du.scaled_all_reduce_dict(result, self.config.misc.num_gpus)
batch_loss['loss'] += result["loss"].item()
self.optimizer.step()
torch.cuda.empty_cache()
total_timer.toc()
data_meter.update(data_time)
return batch_loss
class PartitionPointNCELossTrainer(PointNCELossTrainer):
def _train_iter(self, data_loader_iter, timers):
# optimizer and loss
self.optimizer.zero_grad()
batch_loss = {
'loss': 0.0,
}
loss = 0
# timing
data_meter, data_timer, total_timer = timers
data_time = 0
total_timer.tic()
data_timer.tic()
input_dict = data_loader_iter.next()
data_time += data_timer.toc(average=False)
# network forwarding
sinput0 = ME.SparseTensor(
input_dict['sinput0_F'], coords=input_dict['sinput0_C']).to(self.cur_device)
F0 = self.model(sinput0)
F0 = F0.F
sinput1 = ME.SparseTensor(
input_dict['sinput1_F'], coords=input_dict['sinput1_C']).to(self.cur_device)
F1 = self.model(sinput1)
F1 = F1.F
# get positive pairs
pos_pairs = input_dict['correspondences'].to(self.cur_device)
q_unique, count = pos_pairs[:, 0].unique(return_counts=True)
uniform = torch.distributions.Uniform(0, 1).sample([len(count)]).to(self.cur_device)
off = torch.floor(uniform*count).long()
cums = torch.cat([torch.tensor([0], device=self.cur_device), torch.cumsum(count, dim=0)[0:-1]], dim=0)
k_sel = pos_pairs[:, 1][off+cums]
# iterate batch
source_batch_ids = input_dict['sinput0_C'][q_unique.long()][:,0].float().cuda()
for batch_id in range(self.batch_size):
# batch mask
mask = (source_batch_ids == batch_id)
q_unique_batch = q_unique[mask]
k_sel_batch = k_sel[mask]
# sampling points in current scene
if self.npos < q_unique_batch.shape[0]:
sampled_inds = np.random.choice(q_unique_batch.shape[0], self.npos, replace=False)
q_unique_batch = q_unique_batch[sampled_inds]
k_sel_batch = k_sel_batch[sampled_inds]
q = F0[q_unique_batch.long()]
k = F1[k_sel_batch.long()]
npos = q.shape[0]
if npos == 0:
logging.info('partitionTrainer: no points in this batch')
continue
source_xyz = input_dict['sinput0_C'][q_unique_batch.long()][:,1:].float().cuda()
if self.config.data.world_space:
T0 = input_dict['T0'][batch_id].cuda()
source_xyz = apply_transform(source_xyz, T0)
if self.config.shape_context.fast_partition:
source_partition = self.partitioner.compute_partitions_fast(source_xyz)
else:
source_partition = self.partitioner.compute_partitions(source_xyz)
for partition_id in range(self.partitioner.partitions):
factor = 1.0
if self.config.shape_context.weight_inner and partition_id < int(self.partitioner.partitions/2):
factor = 2.0
mask_q = (source_partition == partition_id)
mask_q.fill_diagonal_(True)
loss += factor * self.compute_loss(q, k, ~mask_q) / (self.partitioner.partitions * self.batch_size)
loss.backward()
result = {"loss": loss}
if self.config.misc.num_gpus > 1:
result = du.scaled_all_reduce_dict(result, self.config.misc.num_gpus)
batch_loss['loss'] += result["loss"].item()
self.optimizer.step()
torch.cuda.empty_cache()
total_timer.toc()
data_meter.update(data_time)
return batch_loss
class PartitionPointNCELossTrainerPointNet(PointNCELossTrainer):
def _train_iter(self, data_loader_iter, timers):
# optimizer and loss
self.optimizer.zero_grad()
batch_loss = {
'loss': 0.0,
}
loss = 0
# timing
data_meter, data_timer, total_timer = timers
data_time = 0
total_timer.tic()
data_timer.tic()
input_dict = data_loader_iter.next()
data_time += data_timer.toc(average=False)
# network forwarding
points = input_dict['sinput0_C']
feats = input_dict['sinput0_F']
points0 = []
for batch_id in points[:,0].unique():
mask = points[:,0] == batch_id
points0.append(points[mask, 1:])
points0 = torch.stack(points0).cuda()
F0 = self.model(points0)
F0 = F0.transpose(1,2).contiguous()
F0 = F0.view(-1, 32)
points = input_dict['sinput1_C']
feats = input_dict['sinput1_F']
points1 = []
for batch_id in points[:,0].unique():
mask = points[:,0] == batch_id
points1.append(points[mask, 1:])
points1 = torch.stack(points1).cuda()
F1 = self.model(points1)
F1 = F1.transpose(1,2).contiguous()
F1 = F1.view(-1, 32)
# get positive pairs
pos_pairs = input_dict['correspondences'].to(self.cur_device)
q_unique, count = pos_pairs[:, 0].unique(return_counts=True)
uniform = torch.distributions.Uniform(0, 1).sample([len(count)]).to(self.cur_device)
off = torch.floor(uniform*count).long()
cums = torch.cat([torch.tensor([0], device=self.cur_device), torch.cumsum(count, dim=0)[0:-1]], dim=0)
k_sel = pos_pairs[:, 1][off+cums]
# iterate batch
source_batch_ids = input_dict['sinput0_C'][q_unique.long()][:,0].float().cuda()
for batch_id in range(self.batch_size):
# batch mask
mask = (source_batch_ids == batch_id)
q_unique_batch = q_unique[mask]
k_sel_batch = k_sel[mask]
# sampling points in current scene
if self.npos < q_unique_batch.shape[0]:
sampled_inds = np.random.choice(q_unique_batch.shape[0], self.npos, replace=False)
q_unique_batch = q_unique_batch[sampled_inds]
k_sel_batch = k_sel_batch[sampled_inds]
q = F0[q_unique_batch.long()]
k = F1[k_sel_batch.long()]
npos = q.shape[0]
if npos == 0:
logging.info('partitionTrainer: no points in this batch')
continue
source_xyz = input_dict['sinput0_C'][q_unique_batch.long()][:,1:].float().cuda()
if self.config.data.world_space:
T0 = input_dict['T0'][batch_id].cuda()
source_xyz = apply_transform(source_xyz, T0)
if self.config.shape_context.fast_partition:
source_partition = self.partitioner.compute_partitions_fast(source_xyz)
else:
source_partition = self.partitioner.compute_partitions(source_xyz)
for partition_id in range(self.partitioner.partitions):
factor = 1.0
if self.config.shape_context.weight_inner and partition_id < int(self.partitioner.partitions/2):
factor = 2.0
mask_q = (source_partition == partition_id)
mask_q.fill_diagonal_(True)
loss += factor * self.compute_loss(q, k, ~mask_q) / (self.partitioner.partitions * self.batch_size)
loss.backward()
result = {"loss": loss}
if self.config.misc.num_gpus > 1:
result = du.scaled_all_reduce_dict(result, self.config.misc.num_gpus)
batch_loss['loss'] += result["loss"].item()
self.optimizer.step()
torch.cuda.empty_cache()
total_timer.toc()
data_meter.update(data_time)
return batch_loss
| ContrastiveSceneContexts-main | pretrain/contrastive_scene_contexts/lib/ddp_trainer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import trimesh
# color palette for nyu40 labels
def create_color_palette():
return [
(0, 0, 0),
(174, 199, 232), # wall
(152, 223, 138), # floor
(31, 119, 180), # cabinet
(255, 187, 120), # bed
(188, 189, 34), # chair
(140, 86, 75), # sofa
(255, 152, 150), # table
(214, 39, 40), # door
(197, 176, 213), # window
(148, 103, 189), # bookshelf
(196, 156, 148), # picture
(23, 190, 207), # counter
(178, 76, 76),
(247, 182, 210), # desk
(66, 188, 102),
(219, 219, 141), # curtain
(140, 57, 197),
(202, 185, 52),
(51, 176, 203),
(200, 54, 131),
(92, 193, 61),
(78, 71, 183),
(172, 114, 82),
(255, 127, 14), # refrigerator
(91, 163, 138),
(153, 98, 156),
(140, 153, 101),
(158, 218, 229), # shower curtain
(100, 125, 154),
(178, 127, 135),
(120, 185, 128),
(146, 111, 194),
(44, 160, 44), # toilet
(112, 128, 144), # sink
(96, 207, 209),
(227, 119, 194), # bathtub
(213, 92, 176),
(94, 106, 211),
(82, 84, 163), # otherfurn
(100, 85, 144),
]
def write_triangle_mesh(vertices, colors, faces, outputFile):
mesh = trimesh.Trimesh(vertices=vertices, vertex_colors=colors, faces=faces, process=False)
mesh.export(outputFile)
def read_triangle_mesh(filename):
mesh = trimesh.load_mesh(filename, process=False)
if isinstance(mesh, trimesh.PointCloud):
vertices = mesh.vertices
colors = mesh.colors
faces = None
elif isinstance(mesh, trimesh.Trimesh):
vertices = mesh.vertices
colors = mesh.visual.vertex_colors
faces = mesh.faces
return vertices, colors, faces | ContrastiveSceneContexts-main | pretrain/contrastive_scene_contexts/lib/io3d.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python3
"""Distributed helpers."""
import pickle
import time
import functools
import logging
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.autograd import Function
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def all_gather_differentiable(tensor):
"""
Run differentiable gather function for SparseConv features with variable number of points.
tensor: [num_points, feature_dim]
"""
world_size = get_world_size()
if world_size == 1:
return [tensor]
num_points, f_dim = tensor.size()
local_np = torch.LongTensor([num_points]).to("cuda")
np_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)]
dist.all_gather(np_list, local_np)
np_list = [int(np.item()) for np in np_list]
max_np = max(np_list)
tensor_list = []
for _ in np_list:
tensor_list.append(torch.FloatTensor(size=(max_np, f_dim)).to("cuda"))
if local_np != max_np:
padding = torch.zeros(size=(max_np-local_np, f_dim)).to("cuda").float()
tensor = torch.cat((tensor, padding), dim=0)
assert tensor.size() == (max_np, f_dim)
dist.all_gather(tensor_list, tensor)
data_list = []
for gather_np, gather_tensor in zip(np_list, tensor_list):
gather_tensor = gather_tensor[:gather_np]
assert gather_tensor.size() == (gather_np, f_dim)
data_list.append(gather_tensor)
return data_list
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.LongTensor([tensor.numel()]).to("cuda")
size_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda"))
if local_size != max_size:
padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def is_master_proc(num_gpus):
"""Determines if the current process is the master process.
Master process is responsible for logging, writing and loading checkpoints.
In the multi GPU setting, we assign the master role to the rank 0 process.
When training using a single GPU, there is only one training processes
which is considered the master processes.
"""
return num_gpus == 1 or torch.distributed.get_rank() == 0
def init_process_group(proc_rank, world_size):
"""Initializes the default process group."""
# Set the GPU to use
torch.cuda.set_device(proc_rank)
# Initialize the process group
torch.distributed.init_process_group(
backend="nccl",
init_method="tcp://{}:{}".format("localhost", "10001"),
world_size=world_size,
rank=proc_rank
)
def destroy_process_group():
"""Destroys the default process group."""
torch.distributed.destroy_process_group()
@functools.lru_cache()
def _get_global_gloo_group():
"""
Return a process group based on gloo backend, containing all the ranks
The result is cached.
"""
if dist.get_backend() == "nccl":
return dist.new_group(backend="gloo")
else:
return dist.group.WORLD
def _serialize_to_tensor(data, group):
backend = dist.get_backend(group)
assert backend in ["gloo", "nccl"]
device = torch.device("cpu" if backend == "gloo" else "cuda")
buffer = pickle.dumps(data)
if len(buffer) > 1024 ** 3:
logger = logging.getLogger(__name__)
logger.warning(
"Rank {} trying to all-gather {:.2f} GB of data on device {}".format(
get_rank(), len(buffer) / (1024 ** 3), device
)
)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to(device=device)
return tensor
def _pad_to_largest_tensor(tensor, group):
"""
Returns:
list[int]: size of the tensor, on each rank
Tensor: padded tensor that has the max size
"""
world_size = dist.get_world_size(group=group)
assert (
world_size >= 1
), "comm.gather/all_gather must be called from ranks within the given group!"
local_size = torch.tensor([tensor.numel()], dtype=torch.int64, device=tensor.device)
size_list = [
torch.zeros([1], dtype=torch.int64, device=tensor.device) for _ in range(world_size)
]
dist.all_gather(size_list, local_size, group=group)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
if local_size != max_size:
padding = torch.zeros((max_size - local_size,), dtype=torch.uint8, device=tensor.device)
tensor = torch.cat((tensor, padding), dim=0)
return size_list, tensor
def all_gather_obj(data, group=None):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors).
Args:
data: any picklable object
group: a torch process group. By default, will use a group which
contains all ranks on gloo backend.
Returns:
list[data]: list of data gathered from each rank
"""
if get_world_size() == 1:
return [data]
if group is None:
group = _get_global_gloo_group()
if dist.get_world_size(group) == 1:
return [data]
tensor = _serialize_to_tensor(data, group)
size_list, tensor = _pad_to_largest_tensor(tensor, group)
max_size = max(size_list)
# receiving Tensor from all ranks
tensor_list = [
torch.empty((max_size,), dtype=torch.uint8, device=tensor.device) for _ in size_list
]
dist.all_gather(tensor_list, tensor, group=group)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def scaled_all_reduce_dict_obj(res_dict, num_gpus):
""" Reduce a dictionary of arbitrary objects. """
res_dict_list = all_gather_obj(res_dict)
assert len(res_dict_list) == num_gpus
res_keys = res_dict_list[0].keys()
res_dict_reduced = {}
for k in res_keys:
res_dict_reduced[k] = 1.0 * sum([r[k] for r in res_dict_list]) / num_gpus
return res_dict_reduced
def scaled_all_reduce_dict(res_dict, num_gpus):
""" Reduce a dictionary of tensors. """
reductions = []
for k in res_dict:
reduction = torch.distributed.all_reduce(res_dict[k], async_op=True)
reductions.append(reduction)
for reduction in reductions:
reduction.wait()
for k in res_dict:
res_dict[k] = res_dict[k].clone().mul_(1.0 / num_gpus)
return res_dict
def scaled_all_reduce(tensors, num_gpus, is_scale=True):
"""Performs the scaled all_reduce operation on the provided tensors.
The input tensors are modified in-place. Currently supports only the sum
reduction operator. The reduced values are scaled by the inverse size of
the process group (equivalent to cfg.NUM_GPUS).
"""
# Queue the reductions
reductions = []
for tensor in tensors:
reduction = torch.distributed.all_reduce(tensor, async_op=True)
reductions.append(reduction)
# Wait for reductions to finish
for reduction in reductions:
reduction.wait()
# Scale the results
if is_scale:
for tensor in tensors:
tensor.mul_(1.0 / num_gpus)
return tensors
def all_gather_batch(tensors):
"""
Performs all_gather operation on the provided tensors.
"""
# Queue the gathered tensors
# gathers = []
tensor_list = []
output_tensor = []
world_size = dist.get_world_size()
for tensor in tensors:
tensor_all = [torch.ones_like(tensor) for _ in range(world_size)]
torch.distributed.all_gather(
# list(tensor_all.unbind(0)),
tensor_all,
tensor,
async_op=False # performance opt
)
tensor_list.append(tensor_all)
# gathers.append(gather)
# Wait for gathers to finish
# for gather in gathers:
# gather.wait()
for tensor_all in tensor_list:
output_tensor.append(torch.cat(tensor_all, dim=0))
return output_tensor
class AllGatherWithGradient(Function):
"""AllGatherWithGradient"""
def __init__(self, args):
super().__init__()
self.args = args
def forward(self, input):
x_gather = all_gather_batch([input])[0]
return x_gather
def backward(self, grad_output):
N = grad_output.size(0)
mini_batchsize = N // self.args.num_gpus
# Does not scale for gradient
grad_output = scaled_all_reduce([grad_output], self.args.num_gpus, is_scale=False)[0]
cur_gpu = get_rank()
grad_output = \
grad_output[cur_gpu * mini_batchsize: (cur_gpu + 1) * mini_batchsize]
return grad_output
class AllGatherVariableSizeWithGradient(Function):
"""
All Gather with Gradient for variable size inputs: [different num_points, ?].
Return a list.
"""
def __init__(self, config):
super().__init__()
self.config = config
def forward(self, input):
x_gather_list = all_gather_differentiable(input)
input_size_list =all_gather_obj(input.size(0))
cur_gpu = get_rank()
if (cur_gpu == 0):
self.start_list = [sum(input_size_list[:t]) for t in range(len(input_size_list)+1)]
dist.barrier()
return torch.cat(x_gather_list, 0)
def backward(self, grad_output):
grad_output = scaled_all_reduce([grad_output], self.config.num_gpus, is_scale=False)[0]
cur_gpu = get_rank()
return grad_output[self.start[cur_gpu]:self.start[cur_gpu+1]]
return grad_output
| ContrastiveSceneContexts-main | pretrain/contrastive_scene_contexts/lib/distributed.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python3
"""Multiprocessing helpers."""
import multiprocessing as mp
import traceback
from lib.error_handler import ErrorHandler
import lib.distributed as du
def run(proc_rank, world_size, error_queue, fun, fun_args, fun_kwargs):
# Initialize the process group
"""Runs a function from a child process."""
try:
# Initialize the process group
du.init_process_group(proc_rank, world_size)
# Run the function
fun(*fun_args, **fun_kwargs)
except KeyboardInterrupt:
# Killed by the parent process
pass
except Exception:
# Propagate exception to the parent process
error_queue.put(traceback.format_exc())
finally:
# Destroy the process group
du.destroy_process_group()
def multi_proc_run(num_proc, fun, fun_args=(), fun_kwargs={}):
"""Runs a function in a multi-proc setting."""
# Handle errors from training subprocesses
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Run each training subprocess
ps = []
for i in range(num_proc):
p_i = mp.Process(
target=run,
args=(i, num_proc, error_queue, fun, fun_args, fun_kwargs)
)
ps.append(p_i)
p_i.start()
error_handler.add_child(p_i.pid)
# Wait for each subprocess to finish
for p in ps:
p.join()
| ContrastiveSceneContexts-main | pretrain/contrastive_scene_contexts/lib/multiprocessing_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import numpy as np
class ShapeContext(object):
def __init__(self, r1=0.125, r2=2, nbins_xy=2, nbins_zy=2):
# right-hand rule
"""
nbins_xy >= 2
nbins_zy >= 1
"""
self.r1 = r1
self.r2 = r2
self.nbins_xy = nbins_xy
self.nbins_zy = nbins_zy
self.partitions = nbins_xy * nbins_zy * 2
@staticmethod
def pdist(rel_trans):
D2 = torch.sum(rel_trans.pow(2), 2)
return torch.sqrt(D2 + 1e-7)
@staticmethod
def compute_rel_trans(A, B):
return A.unsqueeze(0) - B.unsqueeze(1)
@staticmethod
def hash(A, B, seed):
'''
seed = bins of B
entry < 0 will be ignored
'''
mask = (A >= 0) & (B >= 0)
C = torch.zeros_like(A) - 1
C[mask] = A[mask] * seed + B[mask]
return C
@staticmethod
def compute_angles(rel_trans):
""" compute angles between a set of points """
angles_xy = torch.atan2(rel_trans[:,:,1], rel_trans[:,:,0])
#angles between 0, 2*pi
angles_xy = torch.fmod(angles_xy + 2 * math.pi, 2 * math.pi)
angles_zy = torch.atan2(rel_trans[:,:,1], rel_trans[:,:,2])
#angles between 0, pi
angles_zy = torch.fmod(angles_zy + 2 * math.pi, math.pi)
return angles_xy, angles_zy
def compute_partitions(self, xyz):
rel_trans = ShapeContext.compute_rel_trans(xyz, xyz)
# angles
angles_xy, angles_zy = ShapeContext.compute_angles(rel_trans)
angles_xy_bins = torch.floor(angles_xy / (2 * math.pi / self.nbins_xy))
angles_zy_bins = torch.floor(angles_zy / (math.pi / self.nbins_zy))
angles_bins = ShapeContext.hash(angles_xy_bins, angles_zy_bins, self.nbins_zy)
# distances
distance_matrix = ShapeContext.pdist(rel_trans)
dist_bins = torch.zeros_like(angles_bins) - 1
# partitions
mask = (distance_matrix >= self.r1) & (distance_matrix < self.r2)
dist_bins[mask] = 0
mask = distance_matrix >= self.r2
dist_bins[mask] = 1
bins = ShapeContext.hash(dist_bins, angles_bins, self.nbins_xy * self.nbins_zy)
return bins
def compute_partitions_fast(self, xyz):
'''
fast partitions: axis-aligned partitions
'''
partition_matrix = torch.zeros((xyz.shape[0], xyz.shape[0]))
partition_matrix = partition_matrix.cuda() -1e9
rel_trans = ShapeContext.compute_rel_trans(xyz, xyz)
maskUp = rel_trans[:,:,2] > 0.0
maskDown = rel_trans[:,:,2] < 0.0
distance_matrix = ShapeContext.pdist(rel_trans)
mask = (distance_matrix[:,:] > self.r1) & (distance_matrix[:,:] <= self.r2)
partition_matrix[mask & maskUp] = 0
partition_matrix[mask & maskDown] = 1
mask = distance_matrix[:,:] > self.r2
partition_matrix[mask & maskUp] = 2
partition_matrix[mask & maskDown] = 3
self.partitions = 4
return partition_matrix
| ContrastiveSceneContexts-main | pretrain/contrastive_scene_contexts/lib/shape_context.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python3
"""Multiprocessing error handler."""
import os
import signal
import threading
class ChildException(Exception):
"""Wraps an exception from a child process."""
def __init__(self, child_trace):
super(ChildException, self).__init__(child_trace)
class ErrorHandler(object):
"""Multiprocessing error handler (based on fairseq's).
Listens for errors in child processes and
propagates the tracebacks to the parent process.
"""
def __init__(self, error_queue):
# Shared error queue
self.error_queue = error_queue
# Children processes sharing the error queue
self.children_pids = []
# Start a thread listening to errors
self.error_listener = threading.Thread(target=self.listen, daemon=True)
self.error_listener.start()
# Register the signal handler
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
"""Registers a child process."""
self.children_pids.append(pid)
def listen(self):
"""Listens for errors in the error queue."""
# Wait until there is an error in the queue
child_trace = self.error_queue.get()
# Put the error back for the signal handler
self.error_queue.put(child_trace)
# Invoke the signal handler
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, sig_num, stack_frame):
"""Signal handler."""
# Kill children processes
for pid in self.children_pids:
os.kill(pid, signal.SIGINT)
# Propagate the error from the child process
raise ChildException(self.error_queue.get())
| ContrastiveSceneContexts-main | pretrain/contrastive_scene_contexts/lib/error_handler.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import nn
class NCESoftmaxLoss(nn.Module):
def __init__(self):
super(NCESoftmaxLoss, self).__init__()
self.criterion = nn.CrossEntropyLoss()
def forward(self, x, label):
bsz = x.shape[0]
x = x.squeeze()
loss = self.criterion(x, label)
return loss
| ContrastiveSceneContexts-main | pretrain/contrastive_scene_contexts/lib/criterion.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch.utils.data.sampler import Sampler
import torch.distributed as dist
import math
class InfSampler(Sampler):
def __init__(self, data_source, shuffle=False):
self.data_source = data_source
self.shuffle = shuffle
self.reset_permutation()
def reset_permutation(self):
perm = len(self.data_source)
if self.shuffle:
perm = torch.randperm(perm)
self._perm = perm.tolist()
def __iter__(self):
return self
def __next__(self):
if len(self._perm) == 0:
self.reset_permutation()
return self._perm.pop()
def __len__(self):
return len(self.data_source)
next = __next__ # Python 2 compatibility
class DistributedInfSampler(InfSampler):
def __init__(self, data_source, num_replicas=None, rank=None, shuffle=True):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.data_source = data_source
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.it = 0
self.num_samples = int(math.ceil(len(self.data_source) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
self.shuffle = shuffle
self.reset_permutation()
def __next__(self):
it = self.it * self.num_replicas + self.rank
value = self._perm[it % len(self._perm)]
self.it = self.it + 1
if (self.it * self.num_replicas) >= len(self._perm):
self.reset_permutation()
self.it = 0
return value
def __len__(self):
return self.num_samples
| ContrastiveSceneContexts-main | pretrain/contrastive_scene_contexts/lib/data_sampler.py |
# Evaluates semantic label task
# Input:
# - path to .txt prediction files
# - path to .txt ground truth files
# - output file to write results to
# Note that only the valid classes are used for evaluation,
# i.e., any ground truth label not in the valid label set
# is ignored in the evaluation.
#
# example usage: evaluate_semantic_label.py --scan_path [path to scan data] --output_file [output file]
# python imports
import math
import logging
import os, sys, argparse
import inspect
try:
import numpy as np
except:
print("Failed to import numpy package.")
sys.exit(-1)
try:
from itertools import izip
except ImportError:
izip = zip
#currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
#parentdir = os.path.dirname(currentdir)
#sys.path.insert(0,parentdir)
from .scannet_benchmark_utils import util_3d, util
class Evaluator:
def __init__(self, CLASS_LABELS, VALID_CLASS_IDS):
#CLASS_LABELS = ['wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table',
# 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk',
# 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture']
#VALID_CLASS_IDS = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])
self.CLASS_LABELS = CLASS_LABELS
self.VALID_CLASS_IDS = VALID_CLASS_IDS
self.UNKNOWN_ID = np.max(VALID_CLASS_IDS) + 1
self.gt = {}
self.pred = {}
max_id = self.UNKNOWN_ID
self.confusion = np.zeros((max_id+1, max_id+1), dtype=np.ulonglong)
def update_confusion(self, pred_ids, gt_ids, sceneId=None):
# sanity checks
if not pred_ids.shape == gt_ids.shape:
util.print_error('%s: number of predicted values does not match number of vertices' % pred_file, user_fault=True)
n = self.confusion.shape[0]
k = (gt_ids >= 0) & (gt_ids < n)
temporal = np.bincount(n * gt_ids[k].astype(int) + pred_ids[k], minlength=n**2).reshape(n, n)
for valid_class_row in self.VALID_CLASS_IDS:
for valid_class_col in self.VALID_CLASS_IDS:
self.confusion[valid_class_row][valid_class_col] += temporal[valid_class_row][valid_class_col]
@staticmethod
def write_to_benchmark(base='benchmark_segmentation', sceneId=None, pred_ids=None):
os.makedirs(base, exist_ok=True)
util_3d.export_ids('{}.txt'.format(os.path.join(base, sceneId)), pred_ids)
def get_iou(self, label_id, confusion):
if not label_id in self.VALID_CLASS_IDS:
return float('nan')
# #true positives
tp = np.longlong(confusion[label_id, label_id])
# #false negatives
fn = np.longlong(confusion[label_id, :].sum()) - tp
# #false positives
not_ignored = [l for l in self.VALID_CLASS_IDS if not l == label_id]
fp = np.longlong(confusion[not_ignored, label_id].sum())
denom = (tp + fp + fn)
if denom == 0:
return float('nan')
return (float(tp) / denom, tp, denom)
def write_result_file(self, confusion, ious, filename):
with open(filename, 'w') as f:
f.write('iou scores\n')
for i in range(len(self.VALID_CLASS_IDS)):
label_id = self.VALID_CLASS_IDS[i]
label_name = self.CLASS_LABELS[i]
iou = ious[label_name][0]
f.write('{0:<14s}({1:<2d}): {2:>5.3f}\n'.format(label_name, label_id, iou))
f.write("{0:<14s}: {1:>5.3f}".format('mean', np.array([ious[k][0] for k in ious]).mean()))
f.write('\nconfusion matrix\n')
f.write('\t\t\t')
for i in range(len(self.VALID_CLASS_IDS)):
#f.write('\t{0:<14s}({1:<2d})'.format(CLASS_LABELS[i], VALID_CLASS_IDS[i]))
f.write('{0:<8d}'.format(self.VALID_CLASS_IDS[i]))
f.write('\n')
for r in range(len(self.VALID_CLASS_IDS)):
f.write('{0:<14s}({1:<2d})'.format(self.CLASS_LABELS[r], self.VALID_CLASS_IDS[r]))
for c in range(len(self.VALID_CLASS_IDS)):
f.write('\t{0:>5.3f}'.format(confusion[self.VALID_CLASS_IDS[r],self.VALID_CLASS_IDS[c]]))
f.write('\n')
print('wrote results to', filename)
def evaluate_confusion(self, output_file=None):
class_ious = {}
counter = 0
summation = 0
for i in range(len(self.VALID_CLASS_IDS)):
label_name = self.CLASS_LABELS[i]
label_id = self.VALID_CLASS_IDS[i]
class_ious[label_name] = self.get_iou(label_id, self.confusion)
# print
logging.info('classes IoU')
logging.info('----------------------------')
for i in range(len(self.VALID_CLASS_IDS)):
label_name = self.CLASS_LABELS[i]
try:
logging.info('{0:<14s}: {1:>5.3f} ({2:>6d}/{3:<6d})'.format(label_name, class_ious[label_name][0], class_ious[label_name][1], class_ious[label_name][2]))
summation += class_ious[label_name][0]
counter += 1
except:
logging.info('{0:<14s}: nan ( nan/nan )'.format(label_name))
if counter !=0:
logging.info("{0:<14s}: {1:>5.3f}".format('mean', summation / counter))
if output_file:
self.write_result_file(self.confusion, class_ious, output_file)
return summation / counter
def config():
parser = argparse.ArgumentParser()
parser.add_argument('--pred_path', required=True, help='path to directory of predicted .txt files')
parser.add_argument('--gt_path', required=True, help='path to gt files')
parser.add_argument('--output_file', type=str, default='./semantic_label_evaluation.txt')
opt = parser.parse_args()
return opt
def main():
opt = config()
#------------------------- ScanNet --------------------------
CLASS_LABELS = ['wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table',
'door', 'window', 'bookshelf', 'picture', 'counter', 'desk',
'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture']
VALID_CLASS_IDS = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])
evaluator = Evaluator(CLASS_LABELS=CLASS_LABELS, VALID_CLASS_IDS=VALID_CLASS_IDS)
print('reading', len(os.listdir(opt.pred_path))-1, 'scans...')
for i, pred_file in enumerate(os.listdir(opt.pred_path)):
if pred_file == 'semantic_label_evaluation.txt':
continue
gt_file = os.path.join(opt.gt_path, pred_file)
if not os.path.isfile(gt_file):
util.print_error('Result file {} does not match any gt file'.format(pred_file), user_fault=True)
gt_ids = util_3d.load_ids(gt_file)
pred_file = os.path.join(opt.pred_path, pred_file)
pred_ids = util_3d.load_ids(pred_file)
evaluator.update_confusion(pred_ids, gt_ids, pred_file.split('.')[0])
sys.stdout.write("\rscans processed: {}".format(i+1))
sys.stdout.flush()
# evaluate
evaluator.evaluate_confusion(opt.output_file)
if __name__ == '__main__':
main()
| ContrastiveSceneContexts-main | pretrain/contrastive_scene_contexts/lib/evaluation/evaluate_semantic_label.py |
# Evaluates semantic instance task
# Adapted from the CityScapes evaluation: https://github.com/mcordts/cityscapesScripts/tree/master/cityscapesscripts/evaluation
# Input:
# - path to .txt prediction files
# - path to .txt ground truth files
# - output file to write results to
# Each .txt prediction file look like:
# [(pred0) rel. path to pred. mask over verts as .txt] [(pred0) label id] [(pred0) confidence]
# [(pred1) rel. path to pred. mask over verts as .txt] [(pred1) label id] [(pred1) confidence]
# [(pred2) rel. path to pred. mask over verts as .txt] [(pred2) label id] [(pred2) confidence]
# ...
#
# NOTE: The prediction files must live in the root of the given prediction path.
# Predicted mask .txt files must live in a subfolder.
# Additionally, filenames must not contain spaces.
# The relative paths to predicted masks must contain one integer per line,
# where each line corresponds to vertices in the *_vh_clean_2.ply (in that order).
# Non-zero integers indicate part of the predicted instance.
# The label ids specify the class of the corresponding mask.
# Confidence is a float confidence score of the mask.
#
# Note that only the valid classes are used for evaluation,
# i.e., any ground truth label not in the valid label set
# is ignored in the evaluation.
#
# example usage: evaluate_semantic_instance.py --scan_path [path to scan data] --output_file [output file]
# python imports
import logging
import math
import os, sys, argparse
import inspect
from copy import deepcopy
import argparse
import numpy as np
#currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
#parentdir = os.path.dirname(currentdir)
#sys.path.insert(0,parentdir)
from lib.scannet_benchmark_utils import util_3d
from lib.scannet_benchmark_utils import util
def setup_logging():
ch = logging.StreamHandler(sys.stdout)
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(
format=os.uname()[1].split('.')[0] + ' %(asctime)s %(message)s',
datefmt='%m/%d %H:%M:%S',
handlers=[ch])
class Evaluator:
# ---------- Evaluation params ---------- #
# overlaps for evaluation
overlaps = np.append(np.arange(0.5,0.95,0.05), 0.25)
# minimum region size for evaluation [verts]
min_region_sizes = np.array( [ 100 ] )
# distance thresholds [m]
distance_threshes = np.array( [ float('inf') ] )
# distance confidences
distance_confs = np.array( [ -float('inf') ] )
def __init__(self, CLASS_LABELS, VALID_CLASS_IDS, benchmark=False):
# ---------- Label info ---------- #
#CLASS_LABELS = ['cabinet', 'bed', 'chair', 'sofa', 'table', 'door',
# 'window', 'bookshelf', 'picture', 'counter',
# 'desk', 'curtain', 'refrigerator', 'shower curtain',
# 'toilet', 'sink', 'bathtub', 'otherfurniture']
#VALID_CLASS_IDS = np.array([3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])
self.CLASS_LABELS = CLASS_LABELS
self.VALID_CLASS_IDS = VALID_CLASS_IDS
self.ID_TO_LABEL = {}
self.LABEL_TO_ID = {}
for i in range(len(VALID_CLASS_IDS)):
self.LABEL_TO_ID[CLASS_LABELS[i]] = VALID_CLASS_IDS[i]
self.ID_TO_LABEL[VALID_CLASS_IDS[i]] = CLASS_LABELS[i]
self.pred_instances = {}
self.gt_instances = {}
self.benchmark = benchmark
def evaluate_matches(self, matches):
# results: class x overlap
ap = np.zeros( (len(self.distance_threshes) , len(self.CLASS_LABELS) , len(self.overlaps)) , np.float )
for di, (min_region_size, distance_thresh, distance_conf) in enumerate(zip(self.min_region_sizes, self.distance_threshes, self.distance_confs)):
for oi, overlap_th in enumerate(self.overlaps):
pred_visited = {}
for m in matches:
for p in matches[m]['pred']:
for label_name in self.CLASS_LABELS:
for p in matches[m]['pred'][label_name]:
if 'filename' in p:
pred_visited[p['filename']] = False
for li, label_name in enumerate(self.CLASS_LABELS):
y_true = np.empty(0)
y_score = np.empty(0)
hard_false_negatives = 0
has_gt = False
has_pred = False
for m in matches:
pred_instances = matches[m]['pred'][label_name]
gt_instances = matches[m]['gt'][label_name]
# filter groups in ground truth
gt_instances = [ gt for gt in gt_instances if gt['instance_id']>=1000 and gt['vert_count']>=min_region_size and gt['med_dist']<=distance_thresh and gt['dist_conf']>=distance_conf ]
if gt_instances:
has_gt = True
if pred_instances:
has_pred = True
cur_true = np.ones ( len(gt_instances) )
cur_score = np.ones ( len(gt_instances) ) * (-float("inf"))
cur_match = np.zeros( len(gt_instances) , dtype=np.bool )
# collect matches
for (gti,gt) in enumerate(gt_instances):
found_match = False
num_pred = len(gt['matched_pred'])
for pred in gt['matched_pred']:
# greedy assignments
if pred_visited[pred['filename']]:
continue
overlap = float(pred['intersection']) / (gt['vert_count']+pred['vert_count']-pred['intersection'])
if overlap > overlap_th:
confidence = pred['confidence']
# if already have a prediction for this gt,
# the prediction with the lower score is automatically a false positive
if cur_match[gti]:
max_score = max( cur_score[gti] , confidence )
min_score = min( cur_score[gti] , confidence )
cur_score[gti] = max_score
# append false positive
cur_true = np.append(cur_true,0)
cur_score = np.append(cur_score,min_score)
cur_match = np.append(cur_match,True)
# otherwise set score
else:
found_match = True
cur_match[gti] = True
cur_score[gti] = confidence
pred_visited[pred['filename']] = True
if not found_match:
hard_false_negatives += 1
# remove non-matched ground truth instances
cur_true = cur_true [ cur_match==True ]
cur_score = cur_score[ cur_match==True ]
# collect non-matched predictions as false positive
for pred in pred_instances:
found_gt = False
for gt in pred['matched_gt']:
overlap = float(gt['intersection']) / (gt['vert_count']+pred['vert_count']-gt['intersection'])
if overlap > overlap_th:
found_gt = True
break
if not found_gt:
num_ignore = pred['void_intersection']
for gt in pred['matched_gt']:
# group?
if gt['instance_id'] < 1000:
num_ignore += gt['intersection']
# small ground truth instances
if gt['vert_count'] < min_region_size or gt['med_dist']>distance_thresh or gt['dist_conf']<distance_conf:
num_ignore += gt['intersection']
proportion_ignore = float(num_ignore)/pred['vert_count']
# if not ignored append false positive
if proportion_ignore <= overlap_th:
cur_true = np.append(cur_true,0)
confidence = pred["confidence"]
cur_score = np.append(cur_score,confidence)
# append to overall results
y_true = np.append(y_true,cur_true)
y_score = np.append(y_score,cur_score)
# compute average precision
if has_gt and has_pred:
# compute precision recall curve first
# sorting and cumsum
score_arg_sort = np.argsort(y_score)
y_score_sorted = y_score[score_arg_sort]
y_true_sorted = y_true[score_arg_sort]
y_true_sorted_cumsum = np.cumsum(y_true_sorted)
# unique thresholds
(thresholds,unique_indices) = np.unique( y_score_sorted , return_index=True )
num_prec_recall = len(unique_indices) + 1
# prepare precision recall
num_examples = len(y_score_sorted)
try:
num_true_examples = y_true_sorted_cumsum[-1]
except:
num_true_examples = 0
precision = np.zeros(num_prec_recall)
recall = np.zeros(num_prec_recall)
# deal with the first point
y_true_sorted_cumsum = np.append( y_true_sorted_cumsum , 0 )
# deal with remaining
for idx_res,idx_scores in enumerate(unique_indices):
cumsum = y_true_sorted_cumsum[idx_scores-1]
tp = num_true_examples - cumsum
fp = num_examples - idx_scores - tp
fn = cumsum + hard_false_negatives
p = float(tp)/(tp+fp)
r = float(tp)/(tp+fn)
precision[idx_res] = p
recall [idx_res] = r
# first point in curve is artificial
precision[-1] = 1.
recall [-1] = 0.
# compute average of precision-recall curve
recall_for_conv = np.copy(recall)
recall_for_conv = np.append(recall_for_conv[0], recall_for_conv)
recall_for_conv = np.append(recall_for_conv, 0.)
stepWidths = np.convolve(recall_for_conv,[-0.5,0,0.5],'valid')
# integrate is now simply a dot product
ap_current = np.dot(precision, stepWidths)
elif has_gt:
ap_current = 0.0
else:
ap_current = float('nan')
ap[di,li,oi] = ap_current
return ap
def compute_averages(self, aps):
d_inf = 0
o50 = np.where(np.isclose(self.overlaps,0.5))
o25 = np.where(np.isclose(self.overlaps,0.25))
oAllBut25 = np.where(np.logical_not(np.isclose(self.overlaps,0.25)))
avg_dict = {}
#avg_dict['all_ap'] = np.nanmean(aps[ d_inf,:,: ])
avg_dict['all_ap'] = np.nanmean(aps[ d_inf,:,oAllBut25])
avg_dict['all_ap_50%'] = np.nanmean(aps[ d_inf,:,o50])
avg_dict['all_ap_25%'] = np.nanmean(aps[ d_inf,:,o25])
avg_dict["classes"] = {}
for (li,label_name) in enumerate(self.CLASS_LABELS):
avg_dict["classes"][label_name] = {}
#avg_dict["classes"][label_name]["ap"] = np.average(aps[ d_inf,li, :])
avg_dict["classes"][label_name]["ap"] = np.average(aps[ d_inf,li,oAllBut25])
avg_dict["classes"][label_name]["ap50%"] = np.average(aps[ d_inf,li,o50])
avg_dict["classes"][label_name]["ap25%"] = np.average(aps[ d_inf,li,o25])
return avg_dict
def assign_instances_for_scan(self, scene_id):
# get gt instances
gt_ids = self.gt_instances[scene_id]
gt_instances = util_3d.get_instances(gt_ids, self.VALID_CLASS_IDS, self.CLASS_LABELS, self.ID_TO_LABEL)
# associate
gt2pred = deepcopy(gt_instances)
for label in gt2pred:
for gt in gt2pred[label]:
gt['matched_pred'] = []
pred2gt = {}
for label in self.CLASS_LABELS:
pred2gt[label] = []
num_pred_instances = 0
# mask of void labels in the groundtruth
bool_void = np.logical_not(np.in1d(gt_ids//1000, self.VALID_CLASS_IDS))
# go thru all prediction masks
for instance_id in self.pred_instances[scene_id]:
label_id = int(self.pred_instances[scene_id][instance_id]['label_id'])
conf = self.pred_instances[scene_id][instance_id]['conf']
if not label_id in self.ID_TO_LABEL:
continue
label_name = self.ID_TO_LABEL[label_id]
# read the mask
pred_mask = self.pred_instances[scene_id][instance_id]['pred_mask']
# convert to binary
num = np.count_nonzero(pred_mask)
if num < self.min_region_sizes[0]:
continue # skip if empty
pred_instance = {}
pred_instance['filename'] = str(scene_id) + '/' + str(instance_id)
pred_instance['pred_id'] = num_pred_instances
pred_instance['label_id'] = label_id
pred_instance['vert_count'] = num
pred_instance['confidence'] = conf
pred_instance['void_intersection'] = np.count_nonzero(np.logical_and(bool_void, pred_mask))
# matched gt instances
matched_gt = []
# go thru all gt instances with matching label
for (gt_num, gt_inst) in enumerate(gt2pred[label_name]):
intersection = np.count_nonzero(np.logical_and(gt_ids == gt_inst['instance_id'], pred_mask))
if intersection > 0:
gt_copy = gt_inst.copy()
pred_copy = pred_instance.copy()
gt_copy['intersection'] = intersection
pred_copy['intersection'] = intersection
matched_gt.append(gt_copy)
gt2pred[label_name][gt_num]['matched_pred'].append(pred_copy)
pred_instance['matched_gt'] = matched_gt
num_pred_instances += 1
pred2gt[label_name].append(pred_instance)
return gt2pred, pred2gt
def print_results(self, avgs):
sep = ""
col1 = ":"
lineLen = 64
logging.info("")
logging.info("#"*lineLen)
line = ""
line += "{:<15}".format("what" ) + sep + col1
line += "{:>15}".format("AP" ) + sep
line += "{:>15}".format("AP_50%" ) + sep
line += "{:>15}".format("AP_25%" ) + sep
logging.info(line)
logging.info("#"*lineLen)
for (li,label_name) in enumerate(self.CLASS_LABELS):
ap_avg = avgs["classes"][label_name]["ap"]
ap_50o = avgs["classes"][label_name]["ap50%"]
ap_25o = avgs["classes"][label_name]["ap25%"]
line = "{:<15}".format(label_name) + sep + col1
line += sep + "{:>15.3f}".format(ap_avg ) + sep
line += sep + "{:>15.3f}".format(ap_50o ) + sep
line += sep + "{:>15.3f}".format(ap_25o ) + sep
logging.info(line)
all_ap_avg = avgs["all_ap"]
all_ap_50o = avgs["all_ap_50%"]
all_ap_25o = avgs["all_ap_25%"]
logging.info("-"*lineLen)
line = "{:<15}".format("average") + sep + col1
line += "{:>15.3f}".format(all_ap_avg) + sep
line += "{:>15.3f}".format(all_ap_50o) + sep
line += "{:>15.3f}".format(all_ap_25o) + sep
logging.info(line)
logging.info("")
@staticmethod
def write_to_benchmark(output_path='benchmark_instance', scene_id=None, pred_inst={}):
os.makedirs(output_path, exist_ok=True)
os.makedirs(os.path.join(output_path, 'predicted_masks'), exist_ok=True)
f = open(os.path.join(output_path, scene_id + '.txt'), 'w')
for instance_id in pred_inst:
# for pred instance id starts from 0; in gt valid instance id starts from 1
score = pred_inst[instance_id]['conf']
label = pred_inst[instance_id]['label_id']
mask = pred_inst[instance_id]['pred_mask']
f.write('predicted_masks/{}_{:03d}.txt {} {:.4f}'.format(scene_id, instance_id, label, score))
if instance_id < len(pred_inst) - 1:
f.write('\n')
util_3d.export_ids(os.path.join(output_path, 'predicted_masks', scene_id + '_%03d.txt' % (instance_id)), mask)
f.close()
def add_prediction(self, instance_info, id):
self.pred_instances[id] = instance_info
def add_gt(self, instance_info, id):
self.gt_instances[id] = instance_info
def evaluate(self):
print('evaluating', len(self.pred_instances), 'scans...')
matches = {}
for i, scene_id in enumerate(self.pred_instances):
gt2pred, pred2gt = self.assign_instances_for_scan(scene_id)
matches[scene_id] = {}
matches[scene_id]['gt'] = gt2pred
matches[scene_id]['pred'] = pred2gt
sys.stdout.write("\rscans processed: {}".format(i+1))
sys.stdout.flush()
print('')
ap_scores = self.evaluate_matches(matches)
avgs = self.compute_averages(ap_scores)
# print
self.print_results(avgs)
return avgs['all_ap'], avgs['all_ap_50%'], avgs['all_ap_25%']
def write_result_file(avgs, filename):
_SPLITTER = ','
with open(filename, 'w') as f:
f.write(_SPLITTER.join(['class', 'class id', 'ap', 'ap50', 'ap25']) + '\n')
for i in range(len(VALID_CLASS_IDS)):
class_name = CLASS_LABELS[i]
class_id = VALID_CLASS_IDS[i]
ap = avgs["classes"][class_name]["ap"]
ap50 = avgs["classes"][class_name]["ap50%"]
ap25 = avgs["classes"][class_name]["ap25%"]
f.write(_SPLITTER.join([str(x) for x in [class_name, class_id, ap, ap50, ap25]]) + '\n')
def config():
parser = argparse.ArgumentParser()
parser.add_argument('--pred_path', required=True, help='path to directory of predicted .txt files')
parser.add_argument('--gt_path', required=True, help='path to directory of gt .txt files')
parser.add_argument('--output_file', default='semantic_instance_evaluation.txt', help='output file [default: semantic_instance_evaluation.txt]')
opt = parser.parse_args()
return opt
if __name__ == '__main__':
opt = config()
setup_logging()
#-----------------scannet----------------------
CLASS_LABELS = ['cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window',
'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator',
'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture']
VALID_CLASS_IDS = np.array([3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])
evaluator = Evaluator(CLASS_LABELS=CLASS_LABELS, VALID_CLASS_IDS=VALID_CLASS_IDS)
print('reading', len(os.listdir(opt.pred_path))-1, 'scans...')
for i, pred_file in enumerate(os.listdir(opt.pred_path)):
if os.path.isdir(os.path.join(opt.pred_path, pred_file)):
continue
scene_id = pred_file[:12]
sys.stdout.write("\rscans read: {}".format(i+1))
sys.stdout.flush()
gt_file = os.path.join(opt.gt_path, pred_file)
gt_ids = util_3d.load_ids(gt_file)
evaluator.add_gt(gt_ids, scene_id)
instances = util_3d.read_instance_prediction_file(os.path.join(opt.pred_path,pred_file), opt.pred_path)
for pred_mask_file in instances:
# read the mask
pred_mask = util_3d.load_ids(pred_mask_file)
instances[pred_mask_file]['pred_mask'] = pred_mask
evaluator.add_prediction(instances, scene_id)
print('')
_, _, _ = evaluator.evaluate()
| ContrastiveSceneContexts-main | pretrain/contrastive_scene_contexts/lib/evaluation/evaluate_semantic_instance.py |
import os, sys
import csv
try:
import numpy as np
except:
print("Failed to import numpy package.")
sys.exit(-1)
try:
import imageio
except:
print("Please install the module 'imageio' for image processing, e.g.")
print("pip install imageio")
sys.exit(-1)
# print an error message and quit
def print_error(message, user_fault=False):
sys.stderr.write('ERROR: ' + str(message) + '\n')
if user_fault:
sys.exit(2)
sys.exit(-1)
# if string s represents an int
def represents_int(s):
try:
int(s)
return True
except ValueError:
return False
def read_label_mapping(filename, label_from='raw_category', label_to='nyu40id'):
assert os.path.isfile(filename)
mapping = dict()
with open(filename) as csvfile:
reader = csv.DictReader(csvfile, delimiter='\t')
for row in reader:
mapping[row[label_from]] = int(row[label_to])
# if ints convert
if represents_int([key for key in mapping.keys()][0]):
mapping = {int(k):v for k,v in mapping.items()}
return mapping
# input: scene_types.txt or scene_types_all.txt
def read_scene_types_mapping(filename, remove_spaces=True):
assert os.path.isfile(filename)
mapping = dict()
lines = open(filename).read().splitlines()
lines = [line.split('\t') for line in lines]
if remove_spaces:
mapping = { x[1].strip():int(x[0]) for x in lines }
else:
mapping = { x[1]:int(x[0]) for x in lines }
return mapping
# color by label
def visualize_label_image(filename, image):
height = image.shape[0]
width = image.shape[1]
vis_image = np.zeros([height, width, 3], dtype=np.uint8)
color_palette = create_color_palette()
for idx, color in enumerate(color_palette):
vis_image[image==idx] = color
imageio.imwrite(filename, vis_image)
# color by different instances (mod length of color palette)
def visualize_instance_image(filename, image):
height = image.shape[0]
width = image.shape[1]
vis_image = np.zeros([height, width, 3], dtype=np.uint8)
color_palette = create_color_palette()
instances = np.unique(image)
for idx, inst in enumerate(instances):
vis_image[image==inst] = color_palette[inst%len(color_palette)]
imageio.imwrite(filename, vis_image)
| ContrastiveSceneContexts-main | pretrain/contrastive_scene_contexts/lib/evaluation/scannet_benchmark_utils/util.py |
import os, sys
import json
try:
import numpy as np
except:
print("Failed to import numpy package.")
sys.exit(-1)
try:
from plyfile import PlyData, PlyElement
except:
print("Please install the module 'plyfile' for PLY i/o, e.g.")
print("pip install plyfile")
sys.exit(-1)
from . import util
# matrix: 4x4 np array
# points Nx3 np array
def transform_points(matrix, points):
assert len(points.shape) == 2 and points.shape[1] == 3
num_points = points.shape[0]
p = np.concatenate([points, np.ones((num_points, 1))], axis=1)
p = np.matmul(matrix, np.transpose(p))
p = np.transpose(p)
p[:,:3] /= p[:,3,None]
return p[:,:3]
def export_ids(filename, ids):
with open(filename, 'w') as f:
for id in ids:
f.write('%d\n' % id)
def load_ids(filename):
ids = open(filename).read().splitlines()
ids = np.array(ids, dtype=np.int64)
return ids
def read_mesh_vertices(filename):
assert os.path.isfile(filename)
with open(filename, 'rb') as f:
plydata = PlyData.read(f)
num_verts = plydata['vertex'].count
vertices = np.zeros(shape=[num_verts, 3], dtype=np.float32)
vertices[:,0] = plydata['vertex'].data['x']
vertices[:,1] = plydata['vertex'].data['y']
vertices[:,2] = plydata['vertex'].data['z']
return vertices
# export 3d instance labels for instance evaluation
def export_instance_ids_for_eval(filename, label_ids, instance_ids):
assert label_ids.shape[0] == instance_ids.shape[0]
output_mask_path_relative = 'predicted_masks'
name = os.path.splitext(os.path.basename(filename))[0]
output_mask_path = os.path.join(os.path.dirname(filename), output_mask_path_relative)
if not os.path.isdir(output_mask_path):
os.mkdir(output_mask_path)
insts = np.unique(instance_ids)
zero_mask = np.zeros(shape=(instance_ids.shape[0]), dtype=np.int32)
with open(filename, 'w') as f:
for idx, inst_id in enumerate(insts):
if inst_id == 0: # 0 -> no instance for this vertex
continue
loc = np.where(instance_ids == inst_id)
label_id = label_ids[loc[0][0]]
# write mask indexing
output_mask_file_relavtive = os.path.join(output_mask_path_relative, name + '_' + str(idx) + '.txt')
f.write('%s %d %f\n' % (output_mask_file_relavtive, label_id, 1.0))
# write mask
mask = np.copy(zero_mask)
mask[loc[0]] = 1
output_mask_file = os.path.join(output_mask_path, name + '_' + str(idx) + '.txt')
export_ids(output_mask_file, mask)
# ------------ Instance Utils ------------ #
class Instance(object):
instance_id = 0
label_id = 0
vert_count = 0
med_dist = -1
dist_conf = 0.0
def __init__(self, mesh_vert_instances, instance_id):
if (instance_id == -1):
return
self.instance_id = int(instance_id)
self.label_id = int(self.get_label_id(instance_id))
self.vert_count = int(self.get_instance_verts(mesh_vert_instances, instance_id))
def get_label_id(self, instance_id):
return int(instance_id // 1000)
def get_instance_verts(self, mesh_vert_instances, instance_id):
return (mesh_vert_instances == instance_id).sum()
def to_json(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
def to_dict(self):
dict = {}
dict["instance_id"] = self.instance_id
dict["label_id"] = self.label_id
dict["vert_count"] = self.vert_count
dict["med_dist"] = self.med_dist
dict["dist_conf"] = self.dist_conf
return dict
def from_json(self, data):
self.instance_id = int(data["instance_id"])
self.label_id = int(data["label_id"])
self.vert_count = int(data["vert_count"])
if ("med_dist" in data):
self.med_dist = float(data["med_dist"])
self.dist_conf = float(data["dist_conf"])
def __str__(self):
return "("+str(self.instance_id)+")"
def read_instance_prediction_file(filename, pred_path):
lines = open(filename).read().splitlines()
instance_info = {}
abs_pred_path = os.path.abspath(pred_path)
for line in lines:
parts = line.split(' ')
if len(parts) != 3:
util.print_error('invalid instance prediction file. Expected (per line): [rel path prediction] [label id prediction] [confidence prediction]')
if os.path.isabs(parts[0]):
util.print_error('invalid instance prediction file. First entry in line must be a relative path')
mask_file = os.path.join(os.path.dirname(filename), parts[0])
mask_file = os.path.abspath(mask_file)
# check that mask_file lives inside prediction path
if os.path.commonprefix([mask_file, abs_pred_path]) != abs_pred_path:
util.print_error('predicted mask {} in prediction text file {} points outside of prediction path.'.format(mask_file,filename))
info = {}
info["label_id"] = int(float(parts[1]))
info["conf"] = float(parts[2])
instance_info[mask_file] = info
return instance_info
def get_instances(ids, class_ids, class_labels, id2label):
instances = {}
for label in class_labels:
instances[label] = []
instance_ids = np.unique(ids)
for id in instance_ids:
if id == 0:
continue
inst = Instance(ids, id)
if inst.label_id in class_ids:
instances[id2label[inst.label_id]].append(inst.to_dict())
return instances
| ContrastiveSceneContexts-main | pretrain/contrastive_scene_contexts/lib/evaluation/scannet_benchmark_utils/util_3d.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
import argparse
import time
import torch
from mmcv import Config
from mmcv.parallel import MMDataParallel
from model.builder import build_estimator
def parse_args():
parser = argparse.ArgumentParser(description='MMSeg benchmark a model')
parser.add_argument('config', help='test config file path')
parser.add_argument(
'--log-interval', type=int, default=50, help='interval of logging')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
# set cudnn_benchmark
torch.backends.cudnn.benchmark = False
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_estimator(cfg.model, test_cfg=cfg.get('test_cfg'))
model = MMDataParallel(model, device_ids=[0])
model.eval()
# the first several iterations may be very slow so skip them
num_warmup = 5
pure_inf_time = 0
total_iters = 200
metas = [[dict(min_disp=1, max_disp=100, ori_shape=(512, 640), img_shape=(512, 640))]]
img = [torch.rand([1, 1, 3, 512, 640]).cuda()]
data = dict(img=img, img_metas=metas, r_img=img, gt_disp=None)
# benchmark with 200 image and take the average
for i in range(total_iters):
torch.cuda.synchronize()
start_time = time.perf_counter()
with torch.no_grad():
model(return_loss=False, rescale=True, evaluate=False, **data)
torch.cuda.synchronize()
elapsed = time.perf_counter() - start_time
if i >= num_warmup:
pure_inf_time += elapsed
if (i + 1) % args.log_interval == 0:
fps = (i + 1 - num_warmup) / pure_inf_time
print(f'Done image [{i + 1:<3}/ {total_iters}], '
f'fps: {fps:.2f} img / s')
if (i + 1) == total_iters:
fps = (i + 1 - num_warmup) / pure_inf_time
print(f'Overall fps: {fps:.2f} img / s')
break
if __name__ == '__main__':
main()
| CODD-main | benchmark_speed.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
import argparse
import copy
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
from mmcv.cnn.utils import revert_sync_batchnorm
from mmcv.runner import get_dist_info, init_dist
from mmcv.utils import Config, DictAction, get_git_hash
from mmseg import __version__
from mmseg.apis import set_random_seed
from mmseg.datasets import build_dataset
from mmseg.utils import collect_env, get_root_logger
import datasets # NOQA
from apis import train_estimator
from model import build_estimator
def parse_args():
parser = argparse.ArgumentParser(description='Train a segmentor')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--load-from', help='the checkpoint file to load weights from')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=42, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--options', nargs='+', action=DictAction, help='custom options')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--detect_anomaly', action='store_true')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
torch.autograd.set_detect_anomaly(args.detect_anomaly)
cfg = Config.fromfile(args.config)
if args.options is not None:
cfg.merge_from_dict(args.options)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.load_from is not None:
cfg.load_from = args.load_from
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# gpu_ids is used to calculate iter when resuming checkpoint
_, world_size = get_dist_info()
cfg.gpu_ids = range(world_size)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
# set random seeds
if args.seed is not None:
logger.info(f'Set random seed to {args.seed}, deterministic: '
f'{args.deterministic}')
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
meta['exp_name'] = osp.basename(args.config)
model = build_estimator(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
model.init_weights()
# SyncBN is not support for DP
if not distributed:
warnings.warn(
'SyncBN is only supported with DDP. To be compatible with DP, '
'we convert SyncBN to BN. Please use dist_train.sh which can '
'avoid this error.')
model = revert_sync_batchnorm(model)
logger.info(model)
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmseg version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmseg_version=f'{__version__}+{get_git_hash()[:7]}',
config=cfg.pretty_text,
CLASSES=datasets[0].CLASSES,
PALETTE=datasets[0].PALETTE)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
# passing checkpoint meta for saving best checkpoint
meta.update(cfg.checkpoint_config.meta)
train_estimator(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
main()
| CODD-main | train.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
import argparse
import os
import mmcv
import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint, wrap_fp16_model)
from mmcv.utils import DictAction
from mmseg.datasets import build_dataloader, build_dataset
import datasets # NOQA
from apis import multi_gpu_inference, single_gpu_inference
from model import build_estimator
def parse_args():
parser = argparse.ArgumentParser(description="mmseg test (and eval) a model")
parser.add_argument("config", help="test config file path")
parser.add_argument("checkpoint", help="checkpoint file")
parser.add_argument(
"--show-dir", default='./work_dirs/output',
help="directory where logs and visualization will be saved",
)
parser.add_argument('--eval', action='store_true', help='eval results')
parser.add_argument('--show', action='store_true', help='draw comparison figures')
parser.add_argument("--img-dir", help="directory to input images")
parser.add_argument("--r-img-dir", help="directory to input images")
parser.add_argument(
"--img-suffix", default=".png", help="suffix of image file, e.g., '.png'")
parser.add_argument(
"--num-frames", type=int, help="number of frames to run inference"
)
parser.add_argument(
"--num-workers", type=int, help="number of workers to run inference", default=1
)
parser.add_argument("--options", nargs="+", action=DictAction, help="custom options")
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
cfg = mmcv.Config.fromfile(args.config)
if args.options is not None:
cfg.merge_from_dict(args.options)
# set cudnn_benchmark
if cfg.get("cudnn_benchmark", False):
torch.backends.cudnn.benchmark = True
cfg.data.test.test_mode = True
if args.num_frames is not None:
cfg.data.test.num_samples = args.num_frames
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
if not distributed:
cfg.data.workers_per_gpu = 0
# build the dataloader
if args.img_dir is not None:
cfg.data.test.data_root = None
cfg.data.test.img_dir = args.img_dir
cfg.data.test.r_img_dir = args.r_img_dir
cfg.data.test.img_suffix = args.img_suffix
cfg.data.test.r_img_suffix = args.img_suffix
rank, world_size = get_dist_info()
cfg.data.test.rank = rank
cfg.data.test.world_size = world_size
cfg.data.test.inference_mode = True
# build the dataloader
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=1,
workers_per_gpu=args.num_workers,
dist=distributed,
shuffle=False,
persistent_workers=distributed
)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_estimator(cfg.model, test_cfg=cfg.get("test_cfg"))
fp16_cfg = cfg.get("fp16", None)
if fp16_cfg is not None:
wrap_fp16_model(model)
load_checkpoint(model, args.checkpoint, map_location="cpu")
if not distributed:
device_ids = [0] if args.gpus > 1 else None
model = MMDataParallel(model, device_ids=device_ids)
single_gpu_inference(model, data_loader, args.show_dir, show=args.show, evaluate=args.eval)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
)
multi_gpu_inference(model, data_loader, args.show_dir, show=args.show, evaluate=args.eval)
if __name__ == '__main__':
main()
| CODD-main | inference.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
from .inference import single_gpu_inference, multi_gpu_inference
from .train import train_estimator
| CODD-main | apis/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
import warnings
import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import build_optimizer, build_runner
from mmseg.core import DistEvalHook, EvalHook
from mmseg.datasets import build_dataloader, build_dataset
from mmseg.utils import get_root_logger
def train_estimator(model,
dataset,
cfg,
distributed=False,
validate=False,
timestamp=None,
meta=None):
"""Launch estimator training."""
logger = get_root_logger(cfg.log_level)
# prepare data loaders
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
data_loaders = [
build_dataloader(
ds,
cfg.data.samples_per_gpu,
cfg.data.workers_per_gpu,
# cfg.gpus will be ignored if distributed
len(cfg.gpu_ids),
dist=distributed,
seed=cfg.seed,
drop_last=True,
persistent_workers=True if cfg.data.workers_per_gpu > 0 else False) for ds in dataset
]
# put model on gpus
if distributed:
find_unused_parameters = cfg.get('find_unused_parameters', False)
# Sets the `find_unused_parameters` parameter in
# torch.nn.parallel.DistributedDataParallel
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters)
else:
model = MMDataParallel(
model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)
# build runner
optimizer = build_optimizer(model, cfg.optimizer)
if cfg.get('runner') is None:
cfg.runner = {'type': 'IterBasedRunner', 'max_iters': cfg.total_iters}
warnings.warn(
'config is now expected to have a `runner` section, '
'please set `runner` in your config.', UserWarning)
runner = build_runner(
cfg.runner,
default_args=dict(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=cfg.work_dir,
logger=logger,
meta=meta))
# register hooks
runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config,
cfg.checkpoint_config, cfg.log_config,
cfg.get('momentum_config', None))
# an ugly workaround to make the .log and .log.json filenames the same
runner.timestamp = timestamp
# register eval hooks
if validate:
val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
val_dataloader = build_dataloader(
val_dataset,
samples_per_gpu=1,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False,
persistent_workers=True if cfg.data.workers_per_gpu > 0 else False
)
eval_cfg = cfg.get('evaluation', {})
eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner'
eval_hook = DistEvalHook if distributed else EvalHook
# In this PR (https://github.com/open-mmlab/mmcv/pull/1193), the
# priority of IterTimerHook has been modified from 'NORMAL' to 'LOW'.
runner.register_hook(
eval_hook(val_dataloader, **eval_cfg), priority='LOW')
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow)
| CODD-main | apis/train.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
import functools
import os.path as osp
import mmcv
import torch
import torch.distributed as dist
from mmcv.runner import get_dist_info
from mmcv.utils import print_log, mkdir_or_exist
from mmseg.utils import get_root_logger
from utils import RunningStatsWithBuffer
def single_gpu_inference(
model,
data_loader,
out_dir=None,
show=False,
evaluate=False,
**kwargs
):
"""Inference with single GPU.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
out_dir (str, optional): If specified, the results will be dumped
into the directory to save output results.
opacity (float): Opacity of painted segmentation map.
Default 0.5.
Must be in (0, 1] range.
show (bool): whether draw comparison figure.
evaluate (bool): whether to calculate metrics.
Returns:
None.
"""
model.eval()
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
mkdir_or_exist(out_dir)
rs = RunningStatsWithBuffer(osp.join(out_dir, "stats.csv")) if evaluate else None
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, evaluate=evaluate, **data)
if out_dir:
img_metas = data["img_metas"][0].data[0]
for img_meta in img_metas:
out_file = osp.join(out_dir, img_meta["ori_filename"])
model.module.show_result(
img_meta["filename"],
result,
show=show,
out_file=out_file,
inp=data,
dataset={
k: v
for k, v in vars(dataset).items()
if isinstance(v, (int, float, tuple))
},
running_stats=rs,
)
batch_size = len(result)
for _ in range(batch_size):
prog_bar.update()
if evaluate:
print_log(
f"\n{rs.n} samples, mean {rs.mean}, std: {rs.std}", logger=get_root_logger()
)
rs.dump()
def multi_gpu_inference(
model,
data_loader,
out_dir=None,
show=False,
evaluate=False,
**kwargs
):
"""Test model with multiple gpus.
This method tests model with multiple gpus and collects the results
under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'
it encodes results to gpu tensors and use gpu communication for results
collection. On cpu mode it saves the results on different gpus to 'tmpdir'
and collects them by the rank 0 worker.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
out_dir (str): Path of directory to save output results.
opacity(float): Opacity of painted segmentation map.
Default 0.5.
Must be in (0, 1] range.
Returns:
None.
"""
model.eval()
dataset = data_loader.dataset
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(dataset))
mkdir_or_exist(out_dir)
rs = RunningStatsWithBuffer(osp.join(out_dir, "stats.csv")) if evaluate else None
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, evaluate=evaluate, **data)
if out_dir:
img_metas = data["img_metas"][0].data[0]
for img_meta in img_metas:
out_file = osp.join(out_dir, img_meta["ori_filename"])
model.module.show_result(
img_meta["filename"],
result,
show=show,
out_file=out_file,
inp=data,
dataset={
k: v
for k, v in vars(dataset).items()
if isinstance(v, (int, float, tuple))
},
running_stats=rs,
)
if rank == 0:
batch_size = len(result)
for _ in range(batch_size * world_size):
prog_bar.update()
if evaluate:
output = [None for _ in range(world_size)]
dist.all_gather_object(output, rs)
if rank == 0:
rs = functools.reduce(lambda a, b: a + b, output)
print_log(
f"\n{rs.n} samples, mean {rs.mean}, std: {rs.std}",
logger=get_root_logger(),
)
rs.dump()
| CODD-main | apis/inference.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
import math
import random
import cv2
import mmcv
import numpy as np
import torch
import torch.nn.functional as F
import torchvision.transforms as transforms
from mmseg.datasets import PIPELINES
@PIPELINES.register_module(force=True)
class RandomCrop(object):
"""Random crop the image & seg.
Args:
crop_size (tuple): Expected size after cropping, (h, w).
cat_max_ratio (float): The maximum ratio that single category could
occupy.
"""
def __init__(self, crop_size, cat_max_ratio=1.0, ignore_index=255):
assert crop_size[0] > 0 and crop_size[1] > 0
self.crop_size = crop_size
self.cat_max_ratio = cat_max_ratio
self.ignore_index = ignore_index
def get_crop_bbox(self, img):
"""Randomly get a crop bounding box."""
margin_h = max(img.shape[0] - self.crop_size[0], 0)
margin_w = max(img.shape[1] - self.crop_size[1], 0)
offset_h = np.random.randint(0, margin_h + 1)
offset_w = np.random.randint(0, margin_w + 1)
crop_y1, crop_y2 = offset_h, offset_h + self.crop_size[0]
crop_x1, crop_x2 = offset_w, offset_w + self.crop_size[1]
return crop_y1, crop_y2, crop_x1, crop_x2
def crop(self, img, crop_bbox):
"""Crop from ``img``"""
crop_y1, crop_y2, crop_x1, crop_x2 = crop_bbox
img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...]
return img
def __call__(self, results):
"""Call function to randomly crop images, semantic segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Randomly cropped results, 'img_shape' key in result dict is
updated according to crop size.
"""
img = results["img"]
crop_bbox = self.get_crop_bbox(img)
if self.cat_max_ratio < 1.0 and "gt_semantic_seg" in results:
# Repeat 10 times
for _ in range(10):
seg_temp = self.crop(results["gt_semantic_seg"], crop_bbox)
labels, cnt = np.unique(seg_temp, return_counts=True)
cnt = cnt[labels != self.ignore_index]
if len(cnt) > 1 and np.max(cnt) / np.sum(cnt) < self.cat_max_ratio:
break
crop_bbox = self.get_crop_bbox(img)
# crop the image
for key in results.get("img_fields", ["img"]):
img = self.crop(results[key], crop_bbox)
results[key] = img
results["img_shape"] = results["img"].shape
# crop annotations
for key in results.get("seg_fields", []):
results[key] = self.crop(results[key], crop_bbox)
# crop image and semantic seg for clips if present
if "img_list" in results:
new_img_list = []
img_list = results["img_list"]
for curr_img in img_list:
new_img_list.append(self.crop(curr_img, crop_bbox))
results["img_list"] = new_img_list
if "r_img_list" in results:
new_img_list = []
img_list = results["r_img_list"]
for curr_img in img_list:
new_img_list.append(self.crop(curr_img, crop_bbox))
results["r_img_list"] = new_img_list
for key in results.get("seg_fields", []):
key_list = key + "_list"
if key_list not in results:
continue
seg_list = results[key_list]
new_seg_list = []
for curr_seg in seg_list:
new_seg_list.append(self.crop(curr_seg, crop_bbox))
results[key_list] = new_seg_list
# crop intrinsics
if "intrinsics" in results and results["intrinsics"] is not None:
crop_y1, crop_y2, crop_x1, crop_x2 = crop_bbox
new_intrinsics = results["intrinsics"]
new_intrinsics = [new_intrinsics[0], new_intrinsics[1], new_intrinsics[2] - crop_x1,
new_intrinsics[3] - crop_y1]
results["intrinsics"] = new_intrinsics
return results
def __repr__(self):
return self.__class__.__name__ + f"(crop_size={self.crop_size})"
@PIPELINES.register_module(force=True)
class Pad(object):
"""Pad the image & mask.
There are two padding modes: (1) pad to a fixed size and (2) pad to the
minimum size that is divisible by some number.
Added keys are "pad_shape", "pad_fixed_size", "pad_size_divisor",
Args:
size (tuple, optional): Fixed padding size.
size_divisor (int, optional): The divisor of padded size.
pad_val (float, optional): Padding value. Default: 0.
seg_pad_val (float, optional): Padding value of segmentation map.
Default: 255.
"""
def __init__(
self,
size=None,
size_divisor=None,
pad_val=0,
seg_pad_val=255,
disp_pad_val=0,
flow_pad_val=210,
):
self.size = size
self.size_divisor = size_divisor
self.pad_val = pad_val
self.seg_pad_val = seg_pad_val
self.disp_pad_val = disp_pad_val
self.flow_pad_val = flow_pad_val
# only one of size and size_divisor should be valid
assert size is not None or size_divisor is not None
assert size is None or size_divisor is None
def _get_pad_img(self, img):
if self.size is not None:
padded_img = mmcv.impad(
img, shape=self.size, padding_mode='reflect'
)
elif self.size_divisor is not None:
h, w = img.shape[:2]
size = [math.ceil(h / self.size_divisor) * self.size_divisor,
math.ceil(w / self.size_divisor) * self.size_divisor]
padded_img = mmcv.impad(
img, shape=size, padding_mode='reflect'
)
# padded_img = mmcv.impad_to_multiple(img, divisor=self.size_divisor, pad_val=self.pad_val)
return padded_img
def _pad_img(self, results):
"""Pad images according to ``self.size``."""
padded_img = self._get_pad_img(results["img"])
results["img"] = padded_img
results["pad_shape"] = padded_img.shape
results["pad_fixed_size"] = self.size
results["pad_size_divisor"] = self.size_divisor
if "img_list" in results:
curr_imgs = results['img_list']
new_list = []
for curr_img in curr_imgs:
new_list.append(self._get_pad_img(curr_img))
results['img_list'] = new_list
def _pad_r_img(self, results):
"""Pad images according to ``self.size``."""
if "r_img" in results:
results["r_img"] = self._get_pad_img(results["r_img"])
if "r_img_list" in results:
curr_imgs = results['r_img_list']
new_list = []
for curr_img in curr_imgs:
new_list.append(self._get_pad_img(curr_img))
results['r_img_list'] = new_list
def _pad_seg(self, results):
"""Pad masks according to ``results['pad_shape']``."""
if "gt_semantic_seg" in results:
results["gt_semantic_seg"] = mmcv.impad(
results["gt_semantic_seg"],
shape=results["pad_shape"][:2],
pad_val=self.seg_pad_val,
)
if "gt_semantic_seg_list" in results:
curr_list = results["gt_semantic_seg_list"]
new_list = []
for curr_seg in curr_list:
new_list.append(mmcv.impad(
curr_seg, shape=results["pad_shape"][:2], pad_val=self.seg_pad_val
))
results['gt_semantic_seg_list'] = new_list
def _pad_disp(self, results):
"""Pad masks according to ``results['pad_shape']``."""
if "gt_disp" in results:
results["gt_disp"] = mmcv.impad(
results["gt_disp"],
shape=results["pad_shape"][:2],
pad_val=self.disp_pad_val,
)
if "gt_disp_list" in results:
curr_list = results["gt_disp_list"]
new_list = []
for curr_disp in curr_list:
new_list.append(mmcv.impad(
curr_disp, shape=results["pad_shape"][:2], pad_val=self.disp_pad_val
))
results['gt_disp_list'] = new_list
def _pad_flow(self, results):
"""Pad masks according to ``results['pad_shape']``."""
if "gt_flow" in results:
results["gt_flow"] = mmcv.impad(
results["gt_flow"],
shape=results["pad_shape"][:2],
pad_val=self.flow_pad_val,
)
if "gt_flow_list" in results:
curr_list = results["gt_flow_list"]
new_list = []
for curr_flow in curr_list:
new_list.append(mmcv.impad(
curr_flow, shape=results["pad_shape"][:2], pad_val=self.flow_pad_val
))
results['gt_flow_list'] = new_list
def _pad_disp_change(self, results):
"""Pad masks according to ``results['pad_shape']``."""
if "gt_disp_change" in results:
results["gt_disp_change"] = mmcv.impad(
results["gt_disp_change"],
shape=results["pad_shape"][:2],
pad_val=self.flow_pad_val,
)
if "gt_disp_change_list" in results:
curr_list = results["gt_disp_change_list"]
new_list = []
for curr_disp in curr_list:
new_list.append(mmcv.impad(
curr_disp, shape=results["pad_shape"][:2], pad_val=self.flow_pad_val
))
results['gt_disp_change_list'] = new_list
def _pad_disp_2(self, results):
"""Pad masks according to ``results['pad_shape']``."""
if "gt_disp_2" in results:
results["gt_disp_2"] = mmcv.impad(
results["gt_disp_2"],
shape=results["pad_shape"][:2],
pad_val=self.disp_pad_val,
)
if "gt_disp_2_list" in results:
curr_list = results["gt_disp_2_list"]
new_list = []
for curr_disp in curr_list:
new_list.append(mmcv.impad(
curr_disp, shape=results["pad_shape"][:2], pad_val=self.disp_pad_val
))
results['gt_disp_2_list'] = new_list
def _pad_flow_occ(self, results):
"""Pad masks according to ``results['pad_shape']``."""
if "gt_flow_occ" in results:
results["gt_flow_occ"] = mmcv.impad(
results["gt_flow_occ"],
shape=results["pad_shape"][:2],
pad_val=self.seg_pad_val, # pad 255
)
if "gt_flow_occ_list" in results:
curr_list = results["gt_flow_occ_list"]
new_list = []
for curr_occ in curr_list:
new_list.append(mmcv.impad(
curr_occ, shape=results["pad_shape"][:2], pad_val=self.seg_pad_val, # pad 255
))
results['gt_flow_occ_list'] = new_list
def _pad_disp_occ(self, results):
"""Pad masks according to ``results['pad_shape']``."""
if "gt_disp_occ" in results:
results["gt_disp_occ"] = mmcv.impad(
results["gt_disp_occ"],
shape=results["pad_shape"][:2],
pad_val=self.seg_pad_val, # pad 255
)
if "gt_disp_occ_list" in results:
curr_list = results["gt_disp_occ_list"]
new_list = []
for curr_occ in curr_list:
new_list.append(mmcv.impad(
curr_occ, shape=results["pad_shape"][:2], pad_val=self.seg_pad_val, # pad 255
))
results['gt_disp_occ_list'] = new_list
def _pad_disp2(self, results):
"""Pad masks according to ``results['pad_shape']``."""
if "gt_disp2" in results:
results["gt_disp2"] = mmcv.impad(
results["gt_disp2"],
shape=results["pad_shape"][:2],
pad_val=self.disp_pad_val,
)
if "gt_disp2_list" in results:
curr_list = results["gt_disp2_list"]
new_list = []
for curr_disp in curr_list:
new_list.append(mmcv.impad(
curr_disp, shape=results["pad_shape"][:2], pad_val=self.disp_pad_val
))
results['gt_disp2_list'] = new_list
def __call__(self, results):
"""Call function to pad images, masks, semantic segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Updated result dict.
"""
self._pad_img(results)
self._pad_seg(results)
self._pad_r_img(results)
self._pad_disp(results)
self._pad_flow(results)
self._pad_disp_change(results)
self._pad_disp_2(results)
self._pad_flow_occ(results)
self._pad_disp2(results)
self._pad_disp_occ(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (
f"(size={self.size}, size_divisor={self.size_divisor}, "
f"pad_val={self.pad_val})"
)
return repr_str
@PIPELINES.register_module(force=True)
class Normalize(object):
"""Normalize the image.
Added key is "img_norm_cfg".
Args:
mean (sequence): Mean values of 3 channels.
std (sequence): Std values of 3 channels.
to_rgb (bool): Whether to convert the image from BGR to RGB,
default is true.
"""
def __init__(self, mean, std, to_rgb=True):
self.mean = np.array(mean, dtype=np.float32)
self.std = np.array(std, dtype=np.float32)
self.to_rgb = to_rgb
def img_norm(self, results, key):
if key in results:
results[key] = mmcv.imnormalize(
results[key], self.mean, self.std, self.to_rgb,
)
def imglist_norm(self, results, key):
if key in results:
curr_list = results[key]
new_list = []
for img in curr_list:
new_list.append(mmcv.imnormalize(img, self.mean, self.std, self.to_rgb))
results[key] = new_list
def __call__(self, results):
"""Call function to normalize images.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Normalized results, 'img_norm_cfg' key is added into
result dict.
"""
self.img_norm(results, "img")
self.img_norm(results, "r_img")
self.imglist_norm(results, "img_list")
self.imglist_norm(results, "r_img_list")
results["img_norm_cfg"] = dict(mean=self.mean, std=self.std, to_rgb=self.to_rgb)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f"(mean={self.mean}, std={self.std}, to_rgb=" f"{self.to_rgb})"
return repr_str
@PIPELINES.register_module(force=True)
class PhotoMetricDistortion(object):
"""Apply photometric distortion to image sequentially, every transformation
is applied with a probability of 0.5. The position of random contrast is in
second or second to last. If asymmetric augmentation is used, 0.5 probability
the augmentation will be asym.
1. random brightness
2. random contrast (mode 0)
3. convert color from BGR to HSV
4. random saturation
5. random hue
6. convert color from HSV to BGR
7. random contrast (mode 1)
Args:
brightness_delta (int): delta of brightness.
contrast_range (tuple): range of contrast.
saturation_range (tuple): range of saturation.
hue_delta (int): delta of hue.
asym (bool): apply augmentation asymmetrically
"""
def __init__(
self,
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18,
asym=False,
):
self.brightness_delta = brightness_delta
self.contrast_lower, self.contrast_upper = contrast_range
self.saturation_lower, self.saturation_upper = saturation_range
self.hue_delta = hue_delta
self.asym = asym
def convert(self, img, alpha=1, beta=0):
"""Multiple with alpha and add beat with clip."""
img = img.astype(np.float32) * alpha + beta
img = np.clip(img, 0, 255)
return img.astype(np.uint8)
def brightness(self, imgs):
"""Brightness distortion."""
p_aug = np.random.randint(2)
p_asym = np.random.randint(2)
if p_aug:
new_imgs = []
beta = np.random.uniform(-self.brightness_delta, self.brightness_delta)
for idx, img in enumerate(imgs):
if self.asym and idx >= len(imgs) / 2 and p_asym: # asym prob for right image only
beta = np.random.uniform(-self.brightness_delta, self.brightness_delta)
new_imgs.append(self.convert(img, beta=beta))
imgs = new_imgs
return imgs
def contrast(self, imgs):
"""Contrast distortion."""
p_aug = np.random.randint(2)
p_asym = np.random.randint(2)
if p_aug:
new_imgs = []
alpha = np.random.uniform(self.contrast_lower, self.contrast_upper)
for idx, img in enumerate(imgs):
if self.asym and idx >= len(imgs) / 2 and p_asym: # asym prob for right image only
alpha = np.random.uniform(self.contrast_lower, self.contrast_upper)
new_imgs.append(self.convert(img, alpha=alpha))
imgs = new_imgs
return imgs
def saturation(self, imgs):
"""Saturation distortion."""
p_aug = np.random.randint(2)
p_asym = np.random.randint(2)
if p_aug:
new_imgs = []
alpha = np.random.uniform(self.saturation_lower, self.saturation_upper)
for idx, img in enumerate(imgs):
if self.asym and idx >= len(imgs) / 2 and p_asym: # asym prob for right image only
alpha = np.random.uniform(self.saturation_lower, self.saturation_upper)
img = mmcv.bgr2hsv(img)
img[:, :, 1] = self.convert(img[:, :, 1], alpha=alpha)
new_imgs.append(mmcv.hsv2bgr(img))
imgs = new_imgs
return imgs
def hue(self, imgs):
"""Hue distortion."""
p_aug = np.random.randint(2)
p_asym = np.random.randint(2)
if p_aug:
new_imgs = []
delta = np.random.randint(-self.hue_delta, self.hue_delta)
for idx, img in enumerate(imgs):
if self.asym and idx >= len(imgs) / 2 and p_asym: # asym prob for right image only
delta = np.random.randint(-self.hue_delta, self.hue_delta)
img = mmcv.bgr2hsv(img)
img[:, :, 0] = (img[:, :, 0].astype(int) + delta) % 180
new_imgs.append(mmcv.hsv2bgr(img))
imgs = new_imgs
return imgs
def __call__(self, results):
"""Call function to perform photometric distortion on images.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with images distorted.
"""
imgs = [results["img"]]
if "r_img" in results:
imgs.append(results["r_img"])
# random brightness
imgs = self.brightness(imgs)
# mode == 0 --> do random contrast first
# mode == 1 --> do random contrast last
mode = np.random.randint(2)
if "img_list" not in results:
if mode == 1:
imgs = self.contrast(imgs)
# random saturation
imgs = self.saturation(imgs)
# random hue
imgs = self.hue(imgs)
# random contrast
if mode == 0:
imgs = self.contrast(imgs)
results["img"] = imgs[0]
if "r_img" in results:
results["r_img"] = imgs[1]
elif "img_list" in results:
import copy
new_list = copy.copy(results["img_list"])
img_list_len = len(new_list)
if "r_img_list" in results:
new_list += results["r_img_list"]
if mode == 1:
new_list = self.contrast(new_list)
# random saturation
new_list = self.saturation(new_list)
# random hue
new_list = self.hue(new_list)
# random contrast
if mode == 0:
new_list = self.contrast(new_list)
results["img_list"] = new_list[:img_list_len]
if "r_img_list" in results:
results['r_img_list'] = new_list[img_list_len:]
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (
f"(brightness_delta={self.brightness_delta}, "
f"contrast_range=({self.contrast_lower}, "
f"{self.contrast_upper}), "
f"saturation_range=({self.saturation_lower}, "
f"{self.saturation_upper}), "
f"hue_delta={self.hue_delta})"
)
return repr_str
@PIPELINES.register_module(force=True)
class StereoPhotoMetricDistortion(object):
"""Apply photometric distortion to image sequentially, every transformation
is applied with a probability of 0.5. The position of random contrast is in
second or second to last. If asymmetric augmentation is used, 0.5 probability
the augmentation will be asym.
1. random brightness
2. random contrast (mode 0)
3. convert color from BGR to HSV
4. random saturation
5. random hue
6. convert color from HSV to BGR
7. random contrast (mode 1)
Args:
brightness_delta (int): delta of brightness.
contrast_range (tuple): range of contrast.
saturation_range (tuple): range of saturation.
hue_delta (int): delta of hue.
prob (float): apply augmentation
asym_prob (float): apply augmentation asymmetrically
"""
def __init__(
self,
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18,
prob=0.5,
asym_prob=0.5,
):
self.brightness_delta = brightness_delta
self.contrast_lower, self.contrast_upper = contrast_range
self.saturation_lower, self.saturation_upper = saturation_range
self.hue_delta = hue_delta
self.prob = prob
self.asym_prob = asym_prob
def convert(self, img, alpha=1, beta=0):
"""Multiple with alpha and add beat with clip."""
img = img.astype(np.float32) * alpha + beta
img = np.clip(img, 0, 255)
return img.astype(np.uint8)
def brightness(self, imgs, r_imgs):
"""Brightness distortion."""
for idx, (img, r_img) in enumerate(zip(imgs, r_imgs)):
p_aug = np.random.rand() < self.prob
p_asym = np.random.rand() < self.asym_prob
if p_aug:
beta = np.random.uniform(-self.brightness_delta, self.brightness_delta)
imgs[idx] = self.convert(img, beta=beta)
if p_asym:
beta = beta * (1 + np.random.uniform(-0.2, 0.2))
r_imgs[idx] = self.convert(r_img, beta=beta)
return imgs, r_imgs
def contrast(self, imgs, r_imgs):
"""Contrast distortion."""
for idx, (img, r_img) in enumerate(zip(imgs, r_imgs)):
p_aug = np.random.rand() < self.prob
p_asym = np.random.rand() < self.asym_prob
if p_aug:
alpha = np.random.uniform(self.contrast_lower, self.contrast_upper)
imgs[idx] = self.convert(img, alpha=alpha)
if p_asym:
alpha = alpha * (1 + np.random.uniform(-0.2, 0.2))
r_imgs[idx] = self.convert(r_img, alpha=alpha)
return imgs, r_imgs
def saturation(self, imgs, r_imgs):
"""Saturation distortion."""
for idx, (img, r_img) in enumerate(zip(imgs, r_imgs)):
p_aug = np.random.rand() < self.prob
p_asym = np.random.rand() < self.asym_prob
if p_aug:
alpha = np.random.uniform(self.saturation_lower, self.saturation_upper)
img = mmcv.bgr2hsv(img)
img[:, :, 1] = self.convert(img[:, :, 1], alpha=alpha)
imgs[idx] = mmcv.hsv2bgr(img)
if p_asym:
alpha = alpha * (1 + np.random.uniform(-0.2, 0.2))
r_img = mmcv.bgr2hsv(r_img)
r_img[:, :, 1] = self.convert(r_img[:, :, 1], alpha=alpha)
r_imgs[idx] = mmcv.hsv2bgr(r_img)
return imgs, r_imgs
def hue(self, imgs, r_imgs):
"""Hue distortion."""
for idx, (img, r_img) in enumerate(zip(imgs, r_imgs)):
p_aug = np.random.rand() < self.prob
p_asym = np.random.rand() < self.asym_prob
if p_aug:
delta = np.random.randint(-self.hue_delta, self.hue_delta)
img = mmcv.bgr2hsv(img)
img[:, :, 0] = (img[:, :, 0].astype(int) + delta) % 180
imgs[idx] = mmcv.hsv2bgr(img)
if p_asym:
delta = delta * (1 + np.random.uniform(-0.2, 0.2))
r_img = mmcv.bgr2hsv(r_img)
r_img[:, :, 0] = (r_img[:, :, 0].astype(int) + delta) % 180
r_imgs[idx] = mmcv.hsv2bgr(r_img)
return imgs, r_imgs
def __call__(self, results):
"""Call function to perform photometric distortion on images.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with images distorted.
"""
imgs = [results["img"]]
r_imgs = [results["r_img"]]
# random brightness
imgs, r_imgs = self.brightness(imgs, r_imgs)
# mode == 0 --> do random contrast first
# mode == 1 --> do random contrast last
mode = np.random.randint(2)
if "img_list" not in results:
if mode == 1:
imgs, r_imgs = self.contrast(imgs, r_imgs)
# random saturation
imgs, r_imgs = self.saturation(imgs, r_imgs)
# random hue
imgs, r_imgs = self.hue(imgs, r_imgs)
# random contrast
if mode == 0:
imgs, r_imgs = self.contrast(imgs, r_imgs)
results["img"] = imgs[0]
results["r_img"] = r_imgs[0]
elif "img_list" in results:
import copy
new_list = copy.copy(results["img_list"])
r_new_list = results["r_img_list"]
if mode == 1:
new_list, r_new_list = self.contrast(new_list, r_new_list)
# random saturation
new_list, r_new_list = self.saturation(new_list, r_new_list)
# random hue
new_list, r_new_list = self.hue(new_list, r_new_list)
# random contrast
if mode == 0:
new_list, r_new_list = self.contrast(new_list, r_new_list)
results["img_list"] = new_list
results['r_img_list'] = r_new_list
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (
f"(brightness_delta={self.brightness_delta}, "
f"contrast_range=({self.contrast_lower}, "
f"{self.contrast_upper}), "
f"saturation_range=({self.saturation_lower}, "
f"{self.saturation_upper}), "
f"hue_delta={self.hue_delta})"
)
return repr_str
@PIPELINES.register_module()
class RandomShiftRotate(object):
"""Randomly apply vertical translate and rotate the input.
Args:
max_shift (float): maximum shift in pixels along vertical direction. Default: 1.5.
max_rotation (float): maximum rotation in degree. Default: 0.2.
prob (float): probability of applying the transform. Default: 0.5.
Targets:
r_image, r_img_list
Image types:
uint8, float32
"""
def __init__(self, max_shift=1.5, max_rotation=0.2, prob=1.0):
self.max_shift = max_shift
self.max_rotation = max_rotation
self.prob = prob
def _shift_and_rotate(self, img):
if random.random() < self.prob:
px2 = random.uniform(-self.max_shift, self.max_shift)
angle2 = random.uniform(-self.max_rotation, self.max_rotation)
image_center = (np.random.uniform(0, img.shape[0]), \
np.random.uniform(0, img.shape[1]))
rot_mat = cv2.getRotationMatrix2D(image_center, angle2, 1.0)
img = cv2.warpAffine(img, rot_mat, img.shape[1::-1], flags=cv2.INTER_LINEAR)
trans_mat = np.float32([[1, 0, 0], [0, 1, px2]])
img = cv2.warpAffine(img, trans_mat, img.shape[1::-1], flags=cv2.INTER_LINEAR)
return img
def __call__(self, results):
if "r_img" in results:
results["r_img"] = self._shift_and_rotate(results["r_img"])
if "r_img_list" in results:
curr_imgs = results['r_img_list']
new_list = []
for curr_img in curr_imgs:
new_list.append(self._shift_and_rotate(curr_img))
results['r_img_list'] = new_list
return results
@PIPELINES.register_module()
class RandomOcclude(object):
"""Randomly apply occlusion.
Args:
w_patch_range (float): min and max value of patch width.
h_patch_range (float): min and max value of patch height.
prob (float): probability of applying the transform. Default: 0.5.
Targets:
r_image, r_img_list
Image types:
uint8, float32
"""
def __init__(self, w_patch_range=(180, 250), h_patch_range=(50, 70), mode='mean', prob=1.0):
self.w_patch_range = w_patch_range
self.h_patch_range = h_patch_range
self.mode = mode
self.prob = prob
def apply(self, img, patch1, patch2):
patch1_yl, patch1_xl, patch1_yh, patch1_xh = patch1
patch2_yl, patch2_xl, patch2_yh, patch2_xh = patch2
img_patch = img[patch2_yl:patch2_yh, patch2_xl:patch2_xh]
if self.mode == 'mean':
img_patch = np.mean(np.mean(img_patch, 0), 0)[np.newaxis, np.newaxis]
img[patch1_yl:patch1_yh, patch1_xl:patch1_xh] = img_patch
return img
def __call__(self, results):
if random.random() < self.prob and "r_img" in results:
img_h, img_w, _ = results["r_img"].shape
patch_h = random.randint(*self.h_patch_range)
patch_w = random.randint(*self.w_patch_range)
patch1_y = random.randint(0, img_h - patch_h)
patch1_x = random.randint(0, img_w - patch_w)
patch2_y = random.randint(0, img_h - patch_h)
patch2_x = random.randint(0, img_w - patch_w)
patch1 = (patch1_y, patch1_x, patch1_y + patch_h, patch1_x + patch_w)
patch2 = (patch2_y, patch2_x, patch2_y + patch_h, patch2_x + patch_w)
if "r_img" in results:
results["r_img"] = self.apply(results["r_img"], patch1, patch2)
if "r_img_list" in results:
curr_imgs = results['r_img_list']
new_list = []
for curr_img in curr_imgs:
new_list.append(self.apply(curr_img, patch1, patch2))
results['r_img_list'] = new_list
return results
| CODD-main | datasets/transforms.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
import copy
import os.path as osp
import re
import sys
import mmcv
import numpy as np
from mmcv.utils import print_log
from mmseg.datasets import DATASETS, CustomDataset
from mmseg.datasets.pipelines import Compose
from mmseg.utils import get_root_logger
from terminaltables import AsciiTable
from tqdm import tqdm
from utils import AverageMeter
sys.setrecursionlimit(
100000
) # NOTE: increase recursion limit to avoid "RuntimeError: maximum recursion depth exceeded while calling a Python object"
MF_MAX_SEQUENCE_LENGTH = 50
@DATASETS.register_module()
class CustomStereoMultiFrameDataset(CustomDataset):
def __init__(
self,
pipeline,
img_dir,
test_mode=False,
disp_range=(1, 210),
calib=None,
depth_range=None,
img_suffix=".png",
r_img_dir=None,
r_img_suffix=".png",
disp_dir=None,
disp_suffix=".exr",
split=None,
data_root=None,
flow_dir=None,
flow_suffix=".exr",
disp_change_dir=None,
disp_change_suffix=".exr",
flow_occ_dir=None,
flow_occ_suffix=".exr",
disp2_dir=None,
disp2_suffix=".exr",
disp_occ_dir=None,
disp_occ_suffix=".exr",
prefix_pattern="",
intrinsics=None,
num_samples=None,
**kwargs,
):
"""custom dataset for temporal stereo
Args:
pipeline (dict): pipeline for reading
img_dir (str): image directory
disp_range (tuple, optional): valid disparity range. Defaults to (1, 210).
calib (float, optional): baseline * focal length, for converting disparity to depth. Defaults to None.
depth_range (tuple, optional): valid depth range, need calib. Defaults to None.
img_suffix (str, optional): Defaults to ".png".
r_img_dir (str, optional): right image directory. Defaults to None.
r_img_suffix (str, optional): Defaults to ".png".
disp_dir (str, optional): disparity directory. Defaults to None.
disp_suffix (str, optional): Defaults to ".exr".
split (str, optional): path to split file. Defaults to None.
data_root (str, optional): prepend path to image data. Defaults to None.
flow_dir (str, optional): optical flow directory. Defaults to None.
flow_suffix (str, optional): Defaults to ".exr".
disp_change_dir (str, optional): disparity change directory. Defaults to None.
disp_change_suffix (str, optional): Defaults to ".exr".
flow_occ_dir (str, optional): optical flow occlusion directory, used to compute disparity change for Sintel and TartanAir. Defaults to None.
flow_occ_suffix (str, optional): Defaults to ".exr".
disp2_dir (str, optional): disparity of next frame in current frame directory, used to compute disparity change for KITTI Depth. Defaults to None.
disp2_suffix (str, optional): Defaults to ".exr".
disp_occ_dir (str, optional): disparity occlusion directory. Defaults to None.
disp_occ_suffix (str, optional): Defaults to ".exr".
prefix_pattern (str, optional): prefix pattern to determine if frames belong to the same sequence. Defaults to "".
intrinsics (list, optional): intrinsics, fx, fy, cx, cy. Defaults to None.
num_samples ([type], optional): number of data to use. Defaults to None.
"""
self.pipeline = Compose(pipeline)
self.img_dir = img_dir
self.img_suffix = img_suffix
self.r_img_dir = r_img_dir
self.r_img_suffix = r_img_suffix
self.disp_dir = disp_dir
self.disp_suffix = disp_suffix
self.split = split
self.data_root = data_root
self.test_mode = test_mode
self.disp_range = disp_range
self.calib = calib
self.depth_range = depth_range
self.intrinsics = intrinsics
self.prefix_pattern = prefix_pattern
self.flow_dir = flow_dir
self.flow_suffix = flow_suffix
self.disp_change_dir = disp_change_dir
self.disp_change_suffix = disp_change_suffix
self.flow_occ_dir = flow_occ_dir
self.flow_occ_suffix = flow_occ_suffix
self.disp2_dir = disp2_dir
self.disp2_suffix = disp2_suffix
self.disp_occ_dir = disp_occ_dir
self.disp_occ_suffix = disp_occ_suffix
if self.depth_range is not None:
assert (
self.calib is not None
), "calib is required to convert disparity to depth"
self.num_frames = kwargs.get("num_frames", 2)
if "num_frames" in kwargs:
kwargs.pop("num_frames")
# join paths if data_root is specified
if self.data_root is not None:
if not mmcv.isabs(self.img_dir):
self.img_dir = osp.join(self.data_root, self.img_dir)
if not (self.ann_dir is None or mmcv.isabs(self.ann_dir)):
self.ann_dir = osp.join(self.data_root, self.ann_dir)
if not (self.r_img_dir is None or mmcv.isabs(self.r_img_dir)):
self.r_img_dir = osp.join(self.data_root, self.r_img_dir)
if not (self.disp_dir is None or mmcv.isabs(self.disp_dir)):
self.disp_dir = osp.join(self.data_root, self.disp_dir)
if not (self.split is None or mmcv.isabs(self.split)):
self.split = osp.join(self.data_root, self.split)
# load annotations
self.img_infos = self.load_annotations(
self.img_dir,
self.img_suffix,
None,
None,
self.r_img_dir,
self.r_img_suffix,
self.disp_dir,
self.disp_suffix,
self.split,
num_samples,
)
def pre_pipeline(self, results):
"""Prepare results dict for pipeline."""
results["img_fields"] = []
results["seg_fields"] = []
results["img_prefix"] = self.img_dir
results["seg_prefix"] = []
results["r_img_prefix"] = self.r_img_dir
results["disp_prefix"] = self.disp_dir
results["flow_prefix"] = self.flow_dir
results["disp_change_prefix"] = self.disp_change_dir
results["flow_occ_prefix"] = self.flow_occ_dir
results["disp2_prefix"] = self.disp2_dir
results["disp_occ_prefix"] = self.disp_occ_dir
# used in evaluation
results["calib"] = self.calib
results["disp_range"] = self.disp_range
results["depth_range"] = self.depth_range
results["intrinsics"] = self.intrinsics
def prepare_test_img(self, idx):
"""Get testing data after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Testing data after pipeline with new keys intorduced by
piepline.
"""
img_info = self.img_infos[idx]
ann_info = self.get_ann_info(idx)
results = dict(img_info=img_info, ann_info=ann_info)
self.pre_pipeline(results)
return self.pipeline(results)
def update_mf_history(self, history, new_entry, num_frames, pattern="_[^_]*$"):
if num_frames > 0:
if len(history) == 0:
history.append(new_entry)
else:
first_entry_name = history[0]["filename"]
first_entry_prefix = re.sub(pattern, "", first_entry_name)
new_entry_name = new_entry["filename"]
new_entry_prefix = re.sub(pattern, "", new_entry_name)
if first_entry_prefix == new_entry_prefix:
history.append(new_entry)
else:
history = [new_entry]
assert len(history) <= num_frames, "History cannot be longer than MF"
if len(history) == num_frames:
curr_history = copy.copy(history)
first_entry = curr_history[0]
first_entry["mf"] = curr_history
history.pop(0)
return first_entry, history
else:
return None, history
else: # this is wrote for testing, where we read the whole video sequence in when num_frames=-1
if len(history) == 0:
history.append(new_entry)
else: # read all frames from same sequence
first_entry_name = history[0]["filename"]
first_entry_prefix = re.sub(pattern, "", first_entry_name)
new_entry_name = new_entry["filename"]
new_entry_prefix = re.sub(pattern, "", new_entry_name)
# a new sequence starts or reaching max len
if len(history) >= MF_MAX_SEQUENCE_LENGTH or first_entry_prefix != new_entry_prefix:
curr_history = copy.copy(history)
first_entry = curr_history[0]
first_entry["mf"] = curr_history
history = [new_entry]
return first_entry, history
else:
history.append(new_entry)
return None, history
def load_annotations(
self,
img_dir,
img_suffix,
ann_dir,
seg_map_suffix,
r_img_dir,
r_img_suffix,
disp_dir,
disp_suffix,
split,
num_samples,
):
"""Load annotation from directory.
Args:
img_dir (str): Path to image directory
img_suffix (str): Suffix of images.
ann_dir (str|None): Path to annotation directory.
seg_map_suffix (str|None): Suffix of segmentation maps.
r_img_dir (str|None): Path to right image directory.
r_img_suffix (str|None): Suffix of right images.
disp_dir (str|None): Path to annotation directory.
disp_suffix (str|None): Suffix of disparity maps.
split (str|None): Split txt file. If split is specified, only file
with suffix in the splits will be loaded. Otherwise, all images
in img_dir/ann_dir will be loaded. Default: None
Returns:
list[dict]: All image info of dataset.
"""
img_infos = []
history = []
if split is not None:
with open(split) as f:
for line in f:
img_name = line.strip()
img_info = dict(filename=img_name + img_suffix)
if r_img_dir is not None:
img_info["r_filename"] = img_name + r_img_suffix
img_info["ann"] = dict()
if ann_dir is not None:
seg_map = img_name + seg_map_suffix
img_info["ann"]["seg_map"] = seg_map
if disp_dir is not None:
disp = img_name + disp_suffix
img_info["ann"]["disp"] = disp
if not img_info["ann"]:
del img_info["ann"]
first_img_info, history = self.update_mf_history(
history, img_info, self.num_frames, pattern=self.prefix_pattern
)
if first_img_info is not None:
img_infos.append(first_img_info)
# add last sequence when testing
if self.num_frames <= 0:
curr_history = copy.copy(history)
first_entry = curr_history[0]
first_entry["mf"] = curr_history
img_infos.append(first_entry)
else:
all_files = mmcv.scandir(img_dir, img_suffix, recursive=True)
all_files = sorted(all_files)
for img in all_files:
img_info = dict(filename=img)
if r_img_dir is not None:
img_info["r_filename"] = img.replace(
img_suffix, r_img_suffix
).replace("left", "right")
img_info["ann"] = dict()
first_img_info, history = self.update_mf_history(
history, img_info, self.num_frames, pattern=self.prefix_pattern
)
if first_img_info is not None:
img_infos.append(first_img_info)
# add last sequence when testing
if self.num_frames <= 0:
curr_history = copy.copy(history)
first_entry = curr_history[0]
first_entry["mf"] = curr_history
img_infos.append(first_entry)
if (
num_samples is not None
and 0 < num_samples <= len(img_infos)
):
img_infos = img_infos[:num_samples]
print_log(f"Loaded {len(img_infos)} images", logger=get_root_logger())
return img_infos
def evaluate_disp(self, results, logger):
"""Evaluate the dataset.
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated.
logger (logging.Logger | None | str): Logger used for printing
related information during evaluation. Default: None.
Returns:
dict[str, float]: Default metrics.
"""
# disp metric
epe_meter = AverageMeter()
th3_meter = AverageMeter()
# temporal metric
t_epe_meter = AverageMeter()
th3_tepe_meter = AverageMeter()
t_epe_rel_meter = AverageMeter()
th1_teper_meter = AverageMeter()
# flow mag metric
flow_mag_meter = AverageMeter()
for _, result in tqdm(enumerate(results)):
epe_meter.update(result['epe'].item())
th3_meter.update(result['th3'].item())
t_epe_meter.update(result['tepe'].item())
th3_tepe_meter.update(result['th3_tepe'].item())
t_epe_rel_meter.update(result['tepe_rel'].item())
th1_teper_meter.update(result['th1_tepe_rel'].item())
flow_mag_meter.update(result['flow_mag'].item())
# depth summary table
summary_table_content = [
("epe", epe_meter, 1),
("th3", th3_meter, 1),
("tepe", t_epe_meter, 1),
("th3_tepe", th3_tepe_meter, 1),
("tepe_rel", t_epe_rel_meter, 1),
("th1_tepe_rel", th1_teper_meter, 1),
("flow_mag", flow_mag_meter, 1),
]
header = [k[0] for k in summary_table_content]
summary_row = [np.round(k[1].avg * k[2], 3) for k in summary_table_content]
summary_table_data = [header, summary_row]
print_log("Summary:", logger)
table = AsciiTable(summary_table_data)
print_log("\n" + table.table, logger=logger)
eval_results = {}
for i in range(len(summary_table_data[0])):
eval_results[summary_table_data[0][i].split(" ")[0]] = summary_table_data[1][i]
return eval_results
def evaluate_motion(self, results, logger, start_idx=7):
count_all = 0
metrics_all = {
"epe2d_scene_flow": 0.0,
"epe2d_optical_flow": 0.0,
"1px_scene_flow": 0.0,
"1px_optical_flow": 0.0,
}
for _, result in tqdm(enumerate(results)):
count_all += result["count"].item()
metrics_all["epe2d_scene_flow"] += result["epe2d_scene_flow"].item()
metrics_all["epe2d_optical_flow"] += result["epe2d_optical_flow"].item()
metrics_all["1px_scene_flow"] += result["1px_scene_flow"].item()
metrics_all["1px_optical_flow"] += result["1px_optical_flow"].item()
# depth summary table
if count_all <= 0.0:
count_all = 1.0
summary_table_content = [
("epe2d_scene_flow", metrics_all["epe2d_scene_flow"], 1.0 / count_all),
("epe2d_optical_flow", metrics_all["epe2d_optical_flow"], 1.0 / count_all),
("1px_scene_flow", metrics_all["1px_scene_flow"], 1.0 / count_all),
("1px_optical_flow", metrics_all["1px_optical_flow"], 1.0 / count_all),
]
header = [k[0] for k in summary_table_content]
summary_row = [np.round(k[1] * k[2], 3) for k in summary_table_content]
summary_table_data = [header, summary_row]
print_log("Summary:", logger)
table = AsciiTable(summary_table_data)
print_log("\n" + table.table, logger=logger)
eval_results = {}
for i in range(len(summary_table_data[0])):
eval_results[summary_table_data[0][i].split(" ")[0]] = summary_table_data[1][i]
return eval_results
def evaluate(self, results, metric="default", logger=None, **kwargs):
"""Evaluate the dataset.
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated.
logger (logging.Logger | None | str): Logger used for printing
related information during evaluation. Default: None.
Returns:
dict[str, float]: Default metrics.
"""
if not isinstance(metric, str):
assert len(metric) == 1
metric = metric[0]
allowed_metrics = ["default", "disp_only", "motion_only"]
if metric not in allowed_metrics:
raise KeyError("metric {} is not supported".format(metric))
if metric == "disp_only":
return self.evaluate_disp(results, logger)
elif metric == "motion_only":
return self.evaluate_motion(results, logger)
elif metric == "default":
eval_results = self.evaluate_disp(results, logger)
eval_results.update(self.evaluate_motion(results, logger))
return eval_results
| CODD-main | datasets/custom_stereo_mf.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
from mmseg.datasets import DATASETS
from .scene_flow import SceneFlowMultiFrameDataset
@DATASETS.register_module()
class TartanAirMultiFrameDataset(SceneFlowMultiFrameDataset):
def __init__(self, **kwargs):
super(SceneFlowMultiFrameDataset, self).__init__(
img_suffix=".png",
r_img_suffix=".png",
disp_suffix=".npy",
flow_suffix=".npy",
flow_occ_suffix=".npy",
prefix_pattern=r"\d+_left.png",
**kwargs,
)
| CODD-main | datasets/tartanair.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
import copy
from mmcv.utils import print_log
from mmseg.datasets import DATASETS
from mmseg.utils import get_root_logger
from .custom_stereo_mf import CustomStereoMultiFrameDataset
@DATASETS.register_module()
class SceneFlowMultiFrameDataset(CustomStereoMultiFrameDataset):
"""Person dataset.
In segmentation map annotation for ADE20K, 0 stands for background, which
is not included in 150 categories. ``reduce_zero_label`` is fixed to True.
The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is fixed to
'.png'.
"""
def __init__(self, **kwargs):
super(SceneFlowMultiFrameDataset, self).__init__(
img_suffix=".png",
r_img_suffix=".png",
disp_suffix=".pfm",
flow_suffix=".pfm",
disp_change_suffix=".pfm",
disp_occ_suffix=".png",
prefix_pattern=r"\d+.png",
**kwargs,
)
def load_annotations(
self,
img_dir,
img_suffix,
ann_dir,
seg_map_suffix,
r_img_dir,
r_img_suffix,
disp_dir,
disp_suffix,
split,
num_samples,
):
"""Load annotation from directory.
Args:
img_dir (str): Path to image directory
img_suffix (str): Suffix of images.
ann_dir (str|None): Path to annotation directory.
seg_map_suffix (str|None): Suffix of segmentation maps.
r_img_dir (str|None): Path to right image directory.
r_img_suffix (str|None): Suffix of right images.
disp_dir (str|None): Path to annotation directory.
disp_suffix (str|None): Suffix of disparity maps.
split (str|None): Split txt file. If split is specified, only file
with suffix in the splits will be loaded. Otherwise, all images
in img_dir/ann_dir will be loaded. Default: None
Returns:
list[dict]: All image info of dataset.
"""
img_infos = []
history = []
if split is not None:
with open(split) as f:
for line in f:
filenames = line.strip().split()
ann = dict(disp=filenames[2])
if len(filenames) > 3:
ann["flow"] = filenames[3]
if len(filenames) > 4:
ann["disp_change"] = filenames[4]
if len(filenames) > 5:
ann["flow_occ"] = filenames[5]
if len(filenames) > 6:
ann["disp2"] = filenames[6]
if len(filenames) > 7:
ann["disp_occ"] = filenames[7]
img_info = dict(
filename=filenames[0], r_filename=filenames[1], ann=ann
)
first_img_info, history = self.update_mf_history(
history, img_info, self.num_frames, pattern=self.prefix_pattern
)
if first_img_info is not None:
img_infos.append(first_img_info)
# add last sequence when testing
if self.num_frames <= 0:
curr_history = copy.copy(history)
first_entry = curr_history[0]
first_entry["mf"] = curr_history
img_infos.append(first_entry)
else:
raise AssertionError("Multi frame dataloader needs split")
if (
num_samples is not None
and 0 < num_samples <= len(img_infos)
):
img_infos = img_infos[:num_samples]
print_log(f"Loaded {len(img_infos)} images", logger=get_root_logger())
return img_infos
| CODD-main | datasets/scene_flow.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
import re
import mmcv
# Requirements: Numpy as PIL/Pillow
import numpy as np
from PIL import Image
# sintel
# Check for endianness, based on Daniel Scharstein's optical flow code.
# Using little-endian architecture, these two should be equal.
TAG_FLOAT = 202021.25
TAG_CHAR = 'PIEH'
### Sintel
def flow_read(filename):
""" Read optical flow from file, return (U,V) tuple.
Original code by Deqing Sun, adapted from Daniel Scharstein.
"""
f = open(filename, 'rb')
check = np.fromfile(f, dtype=np.float32, count=1)[0]
assert check == TAG_FLOAT, ' flow_read:: Wrong tag in flow file (should be: {0}, is: {1}). Big-endian machine? '.format(
TAG_FLOAT, check)
width = np.fromfile(f, dtype=np.int32, count=1)[0]
height = np.fromfile(f, dtype=np.int32, count=1)[0]
size = width * height
assert width > 0 and height > 0 and size > 1 and size < 100000000, ' flow_read:: Wrong input size (width = {0}, height = {1}).'.format(
width, height)
tmp = np.fromfile(f, dtype=np.float32, count=-1).reshape((height, width * 2))
u = tmp[:, np.arange(width) * 2]
v = tmp[:, np.arange(width) * 2 + 1]
return u, v
def flow_write(filename, uv, v=None):
""" Write optical flow to file.
If v is None, uv is assumed to contain both u and v channels,
stacked in depth.
Original code by Deqing Sun, adapted from Daniel Scharstein.
"""
nBands = 2
if v is None:
assert (uv.ndim == 3)
assert (uv.shape[2] == 2)
u = uv[:, :, 0]
v = uv[:, :, 1]
else:
u = uv
assert (u.shape == v.shape)
height, width = u.shape
f = open(filename, 'wb')
# write the header
f.write(TAG_CHAR)
np.array(width).astype(np.int32).tofile(f)
np.array(height).astype(np.int32).tofile(f)
# arrange into matrix form
tmp = np.zeros((height, width * nBands))
tmp[:, np.arange(width) * 2] = u
tmp[:, np.arange(width) * 2 + 1] = v
tmp.astype(np.float32).tofile(f)
f.close()
def depth_read(filename):
""" Read depth data from file, return as numpy array. """
f = open(filename, 'rb')
check = np.fromfile(f, dtype=np.float32, count=1)[0]
assert check == TAG_FLOAT, ' depth_read:: Wrong tag in flow file (should be: {0}, is: {1}). Big-endian machine? '.format(
TAG_FLOAT, check)
width = np.fromfile(f, dtype=np.int32, count=1)[0]
height = np.fromfile(f, dtype=np.int32, count=1)[0]
size = width * height
assert width > 0 and height > 0 and size > 1 and size < 100000000, ' depth_read:: Wrong input size (width = {0}, height = {1}).'.format(
width, height)
depth = np.fromfile(f, dtype=np.float32, count=-1).reshape((height, width))
return depth
def depth_write(filename, depth):
""" Write depth to file. """
height, width = depth.shape[:2]
f = open(filename, 'wb')
# write the header
f.write(TAG_CHAR)
np.array(width).astype(np.int32).tofile(f)
np.array(height).astype(np.int32).tofile(f)
depth.astype(np.float32).tofile(f)
f.close()
def disparity_write(filename, disparity, bitdepth=16):
""" Write disparity to file.
bitdepth can be either 16 (default) or 32.
The maximum disparity is 1024, since the image width in Sintel
is 1024.
"""
d = disparity.copy()
# Clip disparity.
d[d > 1024] = 1024
d[d < 0] = 0
d_r = (d / 4.0).astype('uint8')
d_g = ((d * (2.0 ** 6)) % 256).astype('uint8')
out = np.zeros((d.shape[0], d.shape[1], 3), dtype='uint8')
out[:, :, 0] = d_r
out[:, :, 1] = d_g
if bitdepth > 16:
d_b = (d * (2 ** 14) % 256).astype('uint8')
out[:, :, 2] = d_b
Image.fromarray(out, 'RGB').save(filename, 'PNG')
def disparity_read(filename):
""" Return disparity read from filename. """
f_in = np.array(Image.open(filename))
d_r = f_in[:, :, 0].astype('float64')
d_g = f_in[:, :, 1].astype('float64')
d_b = f_in[:, :, 2].astype('float64')
depth = d_r * 4 + d_g / (2 ** 6) + d_b / (2 ** 14)
return depth
# def cam_read(filename):
# """ Read camera data, return (M,N) tuple.
#
# M is the intrinsic matrix, N is the extrinsic matrix, so that
#
# x = M*N*X,
# with x being a point in homogeneous image pixel coordinates, X being a
# point in homogeneous world coordinates.
# """
# txtdata = np.loadtxt(filename)
# intrinsic = txtdata[0,:9].reshape((3,3))
# extrinsic = textdata[1,:12].reshape((3,4))
# return intrinsic,extrinsic
#
#
# def cam_write(filename,M,N):
# """ Write intrinsic matrix M and extrinsic matrix N to file. """
# Z = np.zeros((2,12))
# Z[0,:9] = M.ravel()
# Z[1,:12] = N.ravel()
# np.savetxt(filename,Z)
def cam_read(filename):
""" Read camera data, return (M,N) tuple.
M is the intrinsic matrix, N is the extrinsic matrix, so that
x = M*N*X,
with x being a point in homogeneous image pixel coordinates, X being a
point in homogeneous world coordinates.
"""
f = open(filename, 'rb')
check = np.fromfile(f, dtype=np.float32, count=1)[0]
assert check == TAG_FLOAT, ' cam_read:: Wrong tag in flow file (should be: {0}, is: {1}). Big-endian machine? '.format(
TAG_FLOAT, check)
M = np.fromfile(f, dtype='float64', count=9).reshape((3, 3))
N = np.fromfile(f, dtype='float64', count=12).reshape((3, 4))
return M, N
def cam_write(filename, M, N):
""" Write intrinsic matrix M and extrinsic matrix N to file. """
f = open(filename, 'wb')
# write the header
f.write(TAG_CHAR)
M.astype('float64').tofile(f)
N.astype('float64').tofile(f)
f.close()
def segmentation_write(filename, segmentation):
""" Write segmentation to file. """
segmentation_ = segmentation.astype('int32')
seg_r = np.floor(segmentation_ / (256 ** 2)).astype('uint8')
seg_g = np.floor((segmentation_ % (256 ** 2)) / 256).astype('uint8')
seg_b = np.floor(segmentation_ % 256).astype('uint8')
out = np.zeros((segmentation.shape[0], segmentation.shape[1], 3), dtype='uint8')
out[:, :, 0] = seg_r
out[:, :, 1] = seg_g
out[:, :, 2] = seg_b
Image.fromarray(out, 'RGB').save(filename, 'PNG')
def segmentation_read(filename):
""" Return disparity read from filename. """
f_in = np.array(Image.open(filename))
seg_r = f_in[:, :, 0].astype('int32')
seg_g = f_in[:, :, 1].astype('int32')
seg_b = f_in[:, :, 2].astype('int32')
segmentation = (seg_r * 256 + seg_g) * 256 + seg_b
return segmentation
### Others
def read_numpy_tartanair(path):
data = np.load(path).astype(np.float32)
return np.array(data)
def read_numpy_tartanair_uint8(path):
data = np.load(path).astype(np.uint8)
return np.array(data)
def read_kitti_disp(img_bytes):
disp = (mmcv.imfrombytes(img_bytes, flag="unchanged", backend="cv2").squeeze()) / 256.0
return disp
def read_kitti_flow(img_bytes):
flow = mmcv.imfrombytes(img_bytes, flag="unchanged", backend="cv2")
flow = flow[:, :, ::-1].astype(np.float32)
flow, valid = flow[:, :, :2], flow[:, :, 2]
flow = (flow - 2 ** 15) / 64.0
return flow, valid
def read_pfm(path):
"""Read pfm file.
Args:
path (str): path to file
Returns:
tuple: (data, scale)
"""
with open(path, "rb") as file:
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip()
if header.decode("ascii") == "PF":
color = True
elif header.decode("ascii") == "Pf":
color = False
else:
raise Exception("Not a PFM file: " + path)
dim_match = re.match(r"^(\d+)\s(\d+)\s$", file.readline().decode("ascii"))
if dim_match:
width, height = list(map(int, dim_match.groups()))
else:
raise Exception("Malformed PFM header.")
scale = float(file.readline().decode("ascii").rstrip())
if scale < 0:
# little-endian
endian = "<"
scale = -scale
else:
# big-endian
endian = ">"
data = np.frombuffer(file.read(), endian + "f")
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
return data, scale
| CODD-main | datasets/data_io.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
from .formating import DefaultFormatBundle # NOQA
from .loading_stereo import * # NOQA
from .custom_stereo_mf import CustomStereoMultiFrameDataset # NOQA
from .kitti_depth import Kitti2015MultiFrameDataset, KittiDepthMultiFrameDataset # NOQA
from .scene_flow import SceneFlowMultiFrameDataset # NOQA
from .sintel import SintelMultiFrameDataset # NOQA
from .tartanair import TartanAirMultiFrameDataset # NOQA
from .transforms import (
RandomCrop,
Pad,
PhotoMetricDistortion,
StereoPhotoMetricDistortion
) # NOQA
__all__ = [k for k in globals().keys() if not k.startswith("_")]
| CODD-main | datasets/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
from mmseg.datasets import DATASETS
from .scene_flow import SceneFlowMultiFrameDataset
@DATASETS.register_module()
class SintelMultiFrameDataset(SceneFlowMultiFrameDataset):
"""Person dataset.
In segmentation map annotation for ADE20K, 0 stands for background, which
is not included in 150 categories. ``reduce_zero_label`` is fixed to True.
The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is fixed to
'.png'.
"""
def __init__(self, **kwargs):
super(SceneFlowMultiFrameDataset, self).__init__(
img_suffix=".png",
r_img_suffix=".png",
disp_suffix=".png",
flow_suffix=".flo",
flow_occ_suffix=".png",
prefix_pattern="frame.*",
**kwargs,
)
| CODD-main | datasets/sintel.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
import os.path as osp
import mmcv
import numpy as np
from mmseg.datasets import PIPELINES
from mmseg.datasets.pipelines import LoadImageFromFile
from .data_io import disparity_read, flow_read, read_numpy_tartanair, read_numpy_tartanair_uint8, read_kitti_disp, \
read_kitti_flow, read_pfm
BF_DEFAULT = 210.0
@PIPELINES.register_module(force=True)
class LoadImagesFromFile(object):
"""Load an image from file.
Required keys are "img_prefix" and "img_info" (a dict that must contain the
key "filename"). Added or updated keys are "filename", "img", "img_shape",
"ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`),
and "img_norm_cfg" (means=0 and stds=1).
Args:
to_float32 (bool): Whether to convert the loaded image to a float32
numpy array. If set to False, the loaded image is an uint8 array.
Defaults to False.
color_type (str): The flag argument for :func:`mmcv.imfrombytes`.
Defaults to 'color'.
file_client_args (dict): Arguments to instantiate a FileClient.
See :class:`mmcv.fileio.FileClient` for details.
Defaults to ``dict(backend='disk')``.
imdecode_backend (str): Backend for :func:`mmcv.imdecode`. Default:
'cv2'
"""
def __init__(self,
to_float32=False,
color_type='color',
file_client_args=dict(backend='disk'),
imdecode_backend='cv2'):
self.to_float32 = to_float32
self.color_type = color_type
self.file_client_args = file_client_args.copy()
self.file_client = None
self.imdecode_backend = imdecode_backend
def __call__(self, results):
"""Call functions to load image and get image meta information.
Args:
results (dict): Result dict from :obj:`mmseg.CustomDataset`.
Returns:
dict: The dict contains loaded image and meta information.
"""
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
if results.get('img_prefix') is not None:
filename = osp.join(results['img_prefix'],
results['img_info']['filename'])
else:
filename = results['img_info']['filename']
img_bytes = self.file_client.get(filename)
img = mmcv.imfrombytes(
img_bytes, flag=self.color_type, backend=self.imdecode_backend)
if self.to_float32:
img = img.astype(np.float32)
results['filename'] = filename
results['ori_filename'] = results['img_info']['filename']
results['img'] = img
results['img_fields'].append('img')
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
num_channels = 1 if len(img.shape) < 3 else img.shape[2]
results['img_norm_cfg'] = dict(
mean=np.zeros(num_channels, dtype=np.float32),
std=np.ones(num_channels, dtype=np.float32),
to_rgb=False)
# Adding the multiple frames after it from "mf" key
if "mf" not in results['img_info']:
results["img_list"] = [img]
else:
img_list = []
imginfolist = results['img_info']['mf']
for curr_imginfo in imginfolist:
if results.get('img_prefix') is not None:
filename = osp.join(results['img_prefix'], curr_imginfo['filename'])
else:
filename = curr_imginfo['filename']
img_bytes = self.file_client.get(filename)
img = mmcv.imfrombytes(
img_bytes, flag=self.color_type, backend=self.imdecode_backend)
if self.to_float32:
img = img.astype(np.float32)
img_list.append(img)
results['img_list'] = img_list
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(to_float32={self.to_float32},'
repr_str += f"color_type='{self.color_type}',"
repr_str += f"imdecode_backend='{self.imdecode_backend}',"
return repr_str
@PIPELINES.register_module()
class LoadRImagesFromFile(LoadImageFromFile):
"""Load an image from file.
Required keys are "r_img_prefix" and "img_info" (a dict that must contain the
key "filename"). Added or updated keys are "filename", "img", "img_shape",
"ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`),
and "img_norm_cfg" (means=0 and stds=1).
Args:
to_float32 (bool): Whether to convert the loaded image to a float32
numpy array. If set to False, the loaded image is an uint8 array.
Defaults to False.
color_type (str): The flag argument for :func:`mmcv.imfrombytes`.
Defaults to 'color'.
file_client_args (dict): Arguments to instantiate a FileClient.
See :class:`mmcv.fileio.FileClient` for details.
Defaults to ``dict(backend='disk')``.
imdecode_backend (str): Backend for :func:`mmcv.imdecode`. Default:
'cv2'
"""
def __init__(self, calib=1.0, **kwargs):
super(LoadRImagesFromFile, self).__init__(**kwargs)
def __call__(self, results):
"""Call functions to load image and get image meta information.
Args:
results (dict): Result dict from :obj:`mmseg.CustomDataset`.
Returns:
dict: The dict contains loaded image and meta information.
"""
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
if results.get("r_img_prefix") is not None:
filename = osp.join(
results["r_img_prefix"], results["img_info"]["r_filename"]
)
else:
filename = results["img_info"]["r_filename"]
img_bytes = self.file_client.get(filename)
r_img = mmcv.imfrombytes(
img_bytes, flag=self.color_type, backend=self.imdecode_backend
)
if self.to_float32:
r_img = r_img.astype(np.float32)
results["r_img"] = r_img
results["img_fields"].append("r_img")
# Loading information about subsequent frames
if "mf" not in results['img_info']:
results['r_img_list'] = [r_img]
else:
img_list = []
imginfolist = results['img_info']['mf']
for curr_imginfo in imginfolist:
if results.get("r_img_prefix") is not None:
filename = osp.join(
results["r_img_prefix"], curr_imginfo["r_filename"]
)
else:
filename = curr_imginfo["r_filename"]
img_bytes = self.file_client.get(filename)
r_img = mmcv.imfrombytes(
img_bytes, flag=self.color_type, backend=self.imdecode_backend
)
if self.to_float32:
r_img = r_img.astype(np.float32)
img_list.append(r_img)
results['r_img_list'] = img_list
return results
@PIPELINES.register_module()
class LoadDispAnnotations(object):
"""Load annotations for disparity/depth prediction.
Args:
file_client_args (dict): Arguments to instantiate a FileClient.
See :class:`mmcv.fileio.FileClient` for details.
Defaults to ``dict(backend='disk')``.
imdecode_backend (str): Backend for :func:`mmcv.imdecode`. Default:
'cv2'
key (str): "disp" or "sparse_disp"
is_reciprocal (bool)
"""
def __init__(
self,
file_client_args=dict(backend="disk"),
imdecode_backend="cv2",
calib=None,
key="disp",
is_reciprocal=False,
):
self.file_client_args = file_client_args.copy()
self.file_client = None
self.imdecode_backend = imdecode_backend
self.key = key
self.is_reciprocal = is_reciprocal
self.calib = None # baseline * focal length
def __call__(self, results):
"""Call function to load multiple types annotations.
Args:
results (dict): Result dict from :obj:`mmseg.CustomDataset`.
Returns:
dict: The dict contains loaded semantic segmentation annotations.
"""
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
if results.get(self.key + "_prefix", None) is not None:
filename = osp.join(
results[self.key + "_prefix"], results["ann_info"][self.key]
)
else:
filename = results["ann_info"][self.key]
if self.imdecode_backend == "pfm":
assert osp.splitext(filename)[1] == ".pfm", "Only support .pfm format"
gt_disp = np.array(read_pfm(filename)[0])
elif self.imdecode_backend == "sintel":
assert osp.splitext(filename)[1] == ".png", "Only support .png format"
gt_disp = disparity_read(filename)
elif self.imdecode_backend == "tartanair":
assert osp.splitext(filename)[1] == ".npy", "Only support .npy format"
gt_disp = read_numpy_tartanair(filename)
elif self.imdecode_backend == "kitti":
assert osp.splitext(filename)[1] == ".png", "Only support .png format"
if "None.png" in filename:
gt_disp = np.zeros_like(results["r_img"])[..., 0]
else:
img_bytes = self.file_client.get(filename)
gt_disp = read_kitti_disp(img_bytes)
else:
img_bytes = self.file_client.get(filename)
gt_disp = (
mmcv.imfrombytes(
img_bytes, flag="unchanged", backend=self.imdecode_backend
).squeeze().astype(np.float32)
)
if gt_disp.ndim == 3:
gt_disp = gt_disp[:, :, -1]
gt_disp[gt_disp == np.inf] = BF_DEFAULT # set to large number to be filtered out
gt_disp[np.isnan(gt_disp)] = BF_DEFAULT
gt_disp = gt_disp.astype(np.float32)
if self.is_reciprocal:
gt_disp = 1 / gt_disp
if self.calib is not None:
gt_disp = self.calib * gt_disp
results["gt_" + self.key] = gt_disp
results["seg_fields"].append("gt_" + self.key)
# Add information about the frames in the clip if present
if "img_info" in results and "mf" in results["img_info"]:
imginfo_list = results["img_info"]["mf"]
disp_list = []
for curr_imginfo in imginfo_list:
curr_anninfo = curr_imginfo["ann"]
if results.get(self.key + "_prefix", None) is not None:
filename = osp.join(
results[self.key + "_prefix"], curr_anninfo[self.key]
)
else:
filename = curr_anninfo[self.key]
if self.imdecode_backend == "pfm":
assert osp.splitext(filename)[1] == ".pfm", "Only support .pfm format"
gt_disp = np.array(read_pfm(filename)[0])
elif self.imdecode_backend == "tartanair":
assert osp.splitext(filename)[1] == ".npy", "Only support .npy format"
gt_disp = read_numpy_tartanair(filename)
elif self.imdecode_backend == "kitti":
assert osp.splitext(filename)[1] == ".png", "Only support .png format"
if "None.png" in filename:
gt_disp = np.zeros_like(results["r_img"])[..., 0]
else:
img_bytes = self.file_client.get(filename)
gt_disp = read_kitti_disp(img_bytes)
else:
img_bytes = self.file_client.get(filename)
gt_disp = (
mmcv.imfrombytes(
img_bytes, flag="unchanged", backend=self.imdecode_backend
).squeeze().astype(np.float32)
)
if gt_disp.ndim == 3:
gt_disp = gt_disp[:, :, -1]
gt_disp[gt_disp == np.inf] = BF_DEFAULT # set to large number to be filtered out
gt_disp[np.isnan(gt_disp)] = BF_DEFAULT
gt_disp = gt_disp.astype(np.float32)
if self.is_reciprocal:
gt_disp = 1 / gt_disp
if self.calib is not None:
gt_disp = self.calib * gt_disp
disp_list.append(gt_disp)
results["gt_" + self.key + "_list"] = disp_list
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f"key='{self.key}',"
repr_str += f"imdecode_backend='{self.imdecode_backend}',"
repr_str += f"is_reciprocal={self.is_reciprocal},"
return repr_str
@PIPELINES.register_module()
class LoadOpticalFlowAnnotations(object):
"""Load annotations for optical flow prediction.
Args:
file_client_args (dict): Arguments to instantiate a FileClient.
See :class:`mmcv.fileio.FileClient` for details.
Defaults to ``dict(backend='disk')``.
imdecode_backend (str): Backend for :func:`mmcv.imdecode`. Default:
'cv2'
key (str): "opt"
"""
def __init__(
self,
file_client_args=dict(backend="disk"),
imdecode_backend="cv2",
key="flow"
):
self.file_client_args = file_client_args.copy()
self.file_client = None
self.imdecode_backend = imdecode_backend
self.key = key
def __call__(self, results):
"""Call function to load multiple types annotations.
Args:
results (dict): Result dict from :obj:`mmseg.CustomDataset`.
Returns:
dict: The dict contains loaded semantic segmentation annotations.
"""
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
if results.get(self.key + "_prefix", None) is not None:
filename = osp.join(
results[self.key + "_prefix"], results["ann_info"][self.key]
)
else:
filename = results["ann_info"][self.key]
if self.imdecode_backend == "pfm":
assert osp.splitext(filename)[1] == ".pfm", "Only support .pfm format"
gt_flow = np.array(read_pfm(filename)[0])
elif self.imdecode_backend == "tartanair":
assert osp.splitext(filename)[1] == ".npy", "Only support .npy format"
gt_flow = read_numpy_tartanair(filename, channel=2)
elif self.imdecode_backend == "kitti":
assert osp.splitext(filename)[1] == ".png", "Only support .png format"
if "None.png" in filename:
gt_flow = np.ones_like(results["r_img"])[..., :2]
gt_flow = gt_flow * BF_DEFAULT
else:
img_bytes = self.file_client.get(filename)
gt_flow, valid = read_kitti_flow(img_bytes)
valid = np.tile(valid[..., None], (1, 1, 2)).astype(bool)
gt_flow[~valid] = BF_DEFAULT
gt_flow = gt_flow.astype(np.float32)
else:
img_bytes = self.file_client.get(filename)
gt_flow = (
mmcv.imfrombytes(
img_bytes, flag="unchanged", backend=self.imdecode_backend
).squeeze().astype(np.float32)
)
if gt_flow.ndim == 3:
gt_flow = gt_flow[:, :, :2]
gt_flow[gt_flow == np.inf] = BF_DEFAULT # set to large number to be filetered out
gt_flow[np.isnan(gt_flow)] = BF_DEFAULT
gt_flow = gt_flow.astype(np.float32)
results["gt_" + self.key] = gt_flow
results["seg_fields"].append("gt_" + self.key)
# Add information about the frames in the clip if present
if "mf" in results["img_info"]:
imginfo_list = results["img_info"]["mf"]
opt_list = []
for curr_imginfo in imginfo_list:
curr_anninfo = curr_imginfo["ann"]
if results.get(self.key + "_prefix", None) is not None:
filename = osp.join(
results[self.key + "_prefix"], curr_anninfo[self.key]
)
else:
filename = curr_anninfo[self.key]
if self.imdecode_backend == "pfm":
assert osp.splitext(filename)[1] == ".pfm", "Only support .pfm format"
gt_flow = np.array(read_pfm(filename)[0])
elif self.imdecode_backend == "tartanair":
assert osp.splitext(filename)[1] == ".npy", "Only support .npy format"
gt_flow = read_numpy_tartanair(filename, channel=2)
elif self.imdecode_backend == "kitti":
assert osp.splitext(filename)[1] == ".png", "Only support .png format"
if "None.png" in filename:
gt_flow = np.ones_like(results["r_img"])[..., :2]
gt_flow = gt_flow * BF_DEFAULT
else:
img_bytes = self.file_client.get(filename)
gt_flow, valid = read_kitti_flow(img_bytes)
valid = np.tile(valid[..., None], (1, 1, 2)).astype(bool)
gt_flow[~valid] = BF_DEFAULT
gt_flow = gt_flow.astype(np.float32)
else:
img_bytes = self.file_client.get(filename)
gt_flow = (
mmcv.imfrombytes(
img_bytes, flag="unchanged", backend=self.imdecode_backend
).squeeze().astype(np.float32)
)
if gt_flow.ndim == 3:
gt_flow = gt_flow[:, :, :2]
gt_flow[gt_flow == np.inf] = BF_DEFAULT # set to large number to be filtered out
gt_flow[np.isnan(gt_flow)] = BF_DEFAULT
gt_flow = gt_flow.astype(np.float32)
opt_list.append(gt_flow)
results["gt_" + self.key + "_list"] = opt_list
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f"key='{self.key}',"
repr_str += f"imdecode_backend='{self.imdecode_backend}',"
return repr_str
@PIPELINES.register_module()
class LoadOcclusionAnnotations(object):
"""
255 for occ
"""
def __init__(
self,
file_client_args=dict(backend="disk"),
imdecode_backend="cv2",
key="flow_occ",
inverse=False
):
self.file_client_args = file_client_args.copy()
self.file_client = None
self.imdecode_backend = imdecode_backend
self.key = key
self.inverse = inverse
def __call__(self, results):
"""Call function to load multiple types annotations.
Args:
results (dict): Result dict from :obj:`mmseg.CustomDataset`.
Returns:
dict: The dict contains loaded semantic segmentation annotations.
"""
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
if results.get(self.key + "_prefix", None) is not None:
filename = osp.join(
results[self.key + "_prefix"], results["ann_info"][self.key]
)
else:
filename = results["ann_info"][self.key]
if self.imdecode_backend == "pfm":
assert osp.splitext(filename)[1] == ".pfm", "Only support .pfm format"
gt_occ = np.array(read_pfm(filename)[0])
elif self.imdecode_backend == "tartanair":
assert osp.splitext(filename)[1] == ".npy", "Only support .npy format"
gt_occ = read_numpy_tartanair_uint8(filename)
else:
img_bytes = self.file_client.get(filename)
gt_occ = (
mmcv.imfrombytes(
img_bytes, flag="unchanged", backend=self.imdecode_backend
).squeeze().astype(np.float32)
)
if gt_occ.ndim == 3:
gt_occ = gt_occ[:, :, -1]
if self.inverse: # make sure occ is True
gt_occ = 255 - gt_occ
results["gt_" + self.key] = gt_occ
results["seg_fields"].append("gt_" + self.key)
# Add information about the frames in the clip if present
if "img_info" in results and "mf" in results["img_info"]:
imginfo_list = results["img_info"]["mf"]
occ_list = []
for curr_imginfo in imginfo_list:
curr_anninfo = curr_imginfo["ann"]
if results.get(self.key + "_prefix", None) is not None:
filename = osp.join(
results[self.key + "_prefix"], curr_anninfo[self.key]
)
else:
filename = curr_anninfo[self.key]
if self.imdecode_backend == "pfm":
assert osp.splitext(filename)[1] == ".pfm", "Only support .pfm format"
gt_occ = np.array(read_pfm(filename)[0])
elif self.imdecode_backend == "tartanair":
assert osp.splitext(filename)[1] == ".npy", "Only support .npy format"
gt_occ = read_numpy_tartanair_uint8(filename)
else:
img_bytes = self.file_client.get(filename)
gt_occ = (
mmcv.imfrombytes(
img_bytes, flag="unchanged", backend=self.imdecode_backend
).squeeze().astype(np.float32)
)
if gt_occ.ndim == 3:
gt_occ = gt_occ[:, :, -1]
if self.inverse: # make sure occ is True
gt_occ = 255 - gt_occ
occ_list.append(gt_occ)
results["gt_" + self.key + "_list"] = occ_list
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f"key='{self.key}',"
repr_str += f"imdecode_backend='{self.imdecode_backend}',"
return repr_str
| CODD-main | datasets/loading_stereo.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
from mmseg.datasets import DATASETS
from .scene_flow import SceneFlowMultiFrameDataset
@DATASETS.register_module()
class Kitti2015MultiFrameDataset(SceneFlowMultiFrameDataset):
def __init__(self, **kwargs):
super(SceneFlowMultiFrameDataset, self).__init__(
img_suffix=".png",
r_img_suffix=".png",
disp_suffix=".png",
flow_suffix=".png",
disp2_suffix=".png",
prefix_pattern=r"_\d+.png",
**kwargs,
)
@DATASETS.register_module()
class KittiDepthMultiFrameDataset(SceneFlowMultiFrameDataset):
def __init__(self, **kwargs):
super(SceneFlowMultiFrameDataset, self).__init__(
img_suffix=".png",
r_img_suffix=".png",
disp_suffix=".png",
flow_suffix=".png",
disp2_suffix=".png",
prefix_pattern=r"\d+.png",
**kwargs,
)
| CODD-main | datasets/kitti_depth.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
import torch
import numpy as np
from mmcv.parallel import DataContainer as DC
from mmseg.datasets import PIPELINES
from mmseg.datasets.pipelines import to_tensor
@PIPELINES.register_module(force=True)
class DefaultFormatBundle(object):
"""Default formatting bundle.
It simplifies the pipeline of formatting common fields, including "img"
and "gt_semantic_seg". These fields are formatted as follows.
- img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True)
- gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor,
(3)to DataContainer (stack=True)
"""
def __call__(self, results):
"""Call function to transform and format common fields in results.
Args:
results (dict): Result dict contains the data to convert.
Returns:
dict: The result dict contains the data that is formatted with
default bundle.
"""
for key in results.get("img_fields", []):
img = results[key]
if len(img.shape) < 3:
img = np.expand_dims(img, -1)
img = np.ascontiguousarray(img.transpose(2, 0, 1))
results[key] = DC(to_tensor(img), stack=True)
if "gt_semantic_seg" in results:
# convert to long
results["gt_semantic_seg"] = DC(
to_tensor(
results["gt_semantic_seg"][None, ...].astype(np.int64)
),
stack=True,
)
if "gt_disp" in results:
results["gt_disp"] = DC(
to_tensor(results["gt_disp"][None, ...]), stack=True
)
if "gt_flow" in results:
gt_flow = np.ascontiguousarray(results["gt_flow"].transpose(2, 0, 1))
results["gt_flow"] = DC(to_tensor(gt_flow), stack=True)
if "gt_sparse_disp" in results:
results["gt_sparse_disp"] = DC(
to_tensor(results["gt_sparse_disp"][None, ...]), stack=True
)
return results
def __repr__(self):
return self.__class__.__name__
@PIPELINES.register_module(force=True)
class DefaultFormatBundleList(object):
"""Default formatting bundle with multiple frames.
It simplifies the pipeline of formatting common fields, including "img"
and "gt_semantic_seg". These fields are formatted as follows.
- img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True)
- gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor,
(3)to DataContainer (stack=True)
"""
def _get_stacked_tensor(self, img_list):
tensor_list = []
for img in img_list:
if len(img.shape) < 3:
img = np.expand_dims(img, -1)
img = np.ascontiguousarray(img.transpose(2, 0, 1))
tensor_list.append(to_tensor(img))
return DC(torch.stack(tensor_list), stack=True)
def check_img(self, results, key, fail=False):
baseImage = results[key]
otherImage = results[key + "_list"][0]
if fail and (np.array_equal(baseImage, otherImage) == False):
assert False
def __call__(self, results):
"""Call function to transform and format common fields in results.
Args:
results (dict): Result dict contains the data to convert.
Returns:
dict: The result dict contains the data that is formatted with
default bundle.
"""
self.check_img(results, "img")
self.check_img(results, "r_img")
if results.get("gt_disp", None) is not None:
self.check_img(results, "gt_disp", fail=True)
if results.get("gt_flow", None) is not None:
self.check_img(results, "gt_flow", fail=True)
if results.get("gt_disp_change", None) is not None:
self.check_img(results, "gt_disp_change", fail=True)
if results.get("gt_flow_occ", None) is not None:
self.check_img(results, "gt_flow_occ", fail=True)
if results.get("gt_disp2", None) is not None:
self.check_img(results, "gt_disp2", fail=True)
if results.get("gt_disp_occ", None) is not None:
self.check_img(results, "gt_disp_occ", fail=True)
for key in results.get("img_fields", []):
results[key] = self._get_stacked_tensor(results[key + "_list"])
del results[key + "_list"]
if "gt_semantic_seg_list" in results:
# convert to long
seg_list = results['gt_semantic_seg_list']
tensor_list = []
for seg in seg_list:
tensor_list.append(
to_tensor(seg[None, ...].astype(np.int64))
)
results['gt_semantic_seg'] = DC(torch.stack(tensor_list), stack=True)
del results['gt_semantic_seg_list']
if "gt_disp_list" in results:
disp_list = results['gt_disp_list']
tensor_list = []
for disp in disp_list:
tensor_list.append(
to_tensor(disp[None, ...])
)
results['gt_disp'] = DC(torch.stack(tensor_list), stack=True)
del results['gt_disp_list']
if "gt_flow_list" in results:
opt_list = results['gt_flow_list']
tensor_list = []
for opt in opt_list:
opt = np.ascontiguousarray(opt.transpose(2, 0, 1))
tensor_list.append(to_tensor(opt))
results['gt_flow'] = DC(torch.stack(tensor_list), stack=True)
del results['gt_flow_list']
if "gt_disp_change_list" in results:
disp_change_list = results['gt_disp_change_list']
tensor_list = []
for disp in disp_change_list:
tensor_list.append(
to_tensor(disp[None, ...])
)
results['gt_disp_change'] = DC(torch.stack(tensor_list), stack=True)
del results['gt_disp_change_list']
if "gt_disp2_list" in results:
disp_change_list = results['gt_disp2_list']
tensor_list = []
for disp in disp_change_list:
tensor_list.append(
to_tensor(disp[None, ...])
)
results['gt_disp2'] = DC(torch.stack(tensor_list), stack=True)
del results['gt_disp2_list']
if "gt_flow_occ" in results:
flow_occ_list = results['gt_flow_occ_list']
tensor_list = []
for flow_occ in flow_occ_list:
tensor_list.append(
to_tensor(flow_occ[None, ...])
)
results['gt_flow_occ'] = DC(torch.stack(tensor_list), stack=True)
del results['gt_flow_occ_list']
if "gt_disp_occ" in results:
disp_occ_list = results['gt_disp_occ_list']
tensor_list = []
for disp_occ in disp_occ_list:
tensor_list.append(
to_tensor(disp_occ[None, ...])
)
results['gt_disp_occ'] = DC(torch.stack(tensor_list), stack=True)
del results['gt_disp_occ_list']
if "gt_sparse_disp_list" in results:
sp_disp_list = results['gt_sparse_disp_list']
tensor_list = []
for sparse_disp in sp_disp_list:
tensor_list.append(
to_tensor(sparse_disp[None, ...])
)
results['gt_sparse_disp'] = DC(torch.stack(tensor_list), stack=True)
del results['gt_sparse_disp_list']
return results
def __repr__(self):
return self.__class__.__name__
| CODD-main | datasets/formating.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
import torch
from .warp import flow_warp
BF_DEFAULT = 1050 * 0.2 # baseline * focal length
__imagenet_stats = {'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]}
def compute_valid_mask(gt_disp, meta, gt_semantic_seg=None, gt_flow_prev=None, gt_disp_change=None):
"""compute valid pixels based on either disparity, segmentation, flow or disp change (< 210 px)
at minimum, disparity should be provided
Args:
gt_disp (Tensor): NxHxW
meta (List): dataset meta information
gt_semantic_seg ([type], optional): NxHxW. Defaults to None.
gt_flow_prev ([type], optional): Nx2xHxW. Defaults to None.
gt_disp_change ([type], optional): NxHxW. Defaults to None.
Returns:
Tensor: True for valid
"""
mask = (gt_disp > meta["disp_range"][0]) & (gt_disp < meta["disp_range"][1])
if gt_semantic_seg is not None:
mask &= gt_semantic_seg > 0
if gt_flow_prev is not None:
mag = torch.sum(gt_flow_prev ** 2, dim=1, keepdim=True).sqrt()
mask &= mag < BF_DEFAULT
if gt_disp_change is not None:
mask &= gt_disp_change.abs() < BF_DEFAULT
mask.detach_()
return mask
def compute_gt_disp_change(gt_flow_occ_prev, gt_disp_prev, gt_disp_curr, gt_flow):
"""derive disparity change from data
Args:
gt_flow_occ_prev (Tensor): Nx1xHxW
gt_disp_prev (Tensor): Nx1xHxW
gt_disp_curr (Tensor): Nx1xHxW
gt_flow (Tensor): Nx2xHxW
Returns:
Tensor: disparity change, Nx1xHxW
"""
gt_disp_curr_warp, valid = flow_warp(
gt_disp_curr, gt_flow, padding_mode="zeros", mode="nearest"
)
gt_disp_change = gt_disp_curr_warp - gt_disp_prev
gt_disp_change[~valid] = BF_DEFAULT
gt_disp_change[gt_flow_occ_prev] = BF_DEFAULT # True for occluded
return gt_disp_change, gt_disp_curr_warp
def collect_metric(state):
"""store results
Args:
state (dict): states storing information
Returns:
Tensor: aggregated results
"""
metric_list = dict()
for k, v in state.items():
if "meter" in k:
metric_list[k.replace('_meter', '')] = torch.tensor([v.avg])
if "all" in k:
metric_list[k.replace('_all', '')] = torch.tensor([v])
return metric_list
def reset_meter(state):
"""reset results in states when new sequence starts
Args:
state (dict)): states storing information
"""
for k, v in state.items():
if "meter" in k:
v.reset()
if "all" in k:
state[k] = 0.0
def collect_gt(kwargs):
"""get ground truth data from kwargs"""
gt_disp = kwargs.get("gt_disp", None)
if gt_disp is not None:
gt_disp_list = torch.unbind(gt_disp[0], dim=1)
else:
gt_disp_list = None
gt_flow = kwargs.get("gt_flow", None)
if gt_flow is not None:
gt_flow_list = torch.unbind(gt_flow[0], dim=1)
else:
gt_flow_list = None
gt_disp_change = kwargs.get("gt_disp_change", None)
if gt_disp_change is not None:
gt_disp_change_list = torch.unbind(gt_disp_change[0], dim=1)
else:
gt_disp_change_list = None
gt_flow_occ = kwargs.get("gt_flow_occ", None)
if gt_flow_occ is not None:
gt_flow_occ_list = torch.unbind(gt_flow_occ[0], dim=1)
else:
gt_flow_occ_list = None
gt_disp2 = kwargs.get("gt_disp2", None)
if gt_disp2 is not None:
gt_disp2_list = torch.unbind(gt_disp2[0], dim=1)
else:
gt_disp2_list = None
gt_disp_occ = kwargs.get("gt_disp_occ", None)
if gt_disp_occ is not None:
gt_disp_occ_list = torch.unbind(gt_disp_occ[0], dim=1)
else:
gt_disp_occ_list = None
return (
gt_disp_list,
gt_flow_list,
gt_disp_change_list,
gt_flow_occ_list,
gt_disp2_list,
gt_disp_occ_list,
)
def denormalize(inp):
output = inp * torch.tensor(__imagenet_stats['std'], device=inp.device)
output = output + torch.tensor(__imagenet_stats['mean'], device=inp.device)
output = output * 255
output = output.byte()
return output
| CODD-main | utils/misc.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
from .running_stats import *
from .metric import *
from .misc import *
from .warp import *
| CODD-main | utils/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
import csv
import re
import numpy as np
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name=' ', fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class RunningStats(object):
"""Computes running mean and standard deviation
Adapted from https://gist.github.com/wassname/a9502f562d4d3e73729dc5b184db2501
Usage:
rs = RunningStats()
for i in range(10):
rs += np.random.randn()
print(rs)
print(rs.mean, rs.std)
"""
def __init__(self, n=0.0, m=None, s=None):
self.n = n
self.m = m
self.s = s
def clear(self):
self.n = 0.0
def push(self, x, per_dim=True):
x = np.array(x).copy().astype('float32')
# process input
if per_dim:
self.update_params(x)
else:
for el in x.flatten():
self.update_params(el)
def update_params(self, x):
self.n += 1
if self.n == 1:
self.m = x
self.s = 0.0
else:
prev_m = self.m.copy()
self.m += (x - self.m) / self.n
self.s += (x - prev_m) * (x - self.m)
def __add__(self, other):
if isinstance(other, RunningStats):
sum_ns = self.n + other.n
prod_ns = self.n * other.n
delta2 = (other.m - self.m) ** 2.0
return RunningStats(
sum_ns,
(self.m * self.n + other.m * other.n) / sum_ns,
self.s + other.s + delta2 * prod_ns / sum_ns,
)
else:
self.push(other)
return self
@property
def mean(self):
return self.m if self.n else 0.0
def variance(self):
return self.s / (self.n) if self.n else 0.0
@property
def std(self):
return np.sqrt(self.variance())
def __repr__(self):
return (
'<RunningMean(mean={: 2.4f}, std={: 2.4f}, n={: 2f}, m={: 2.4f}, s={: 2.4f})>'.format(
self.mean, self.std, self.n, self.m, self.s
)
)
def __str__(self):
return 'mean={: 2.4f}, std={: 2.4f}'.format(self.mean, self.std)
class RunningStatsWithBuffer(RunningStats):
def __init__(self, path=None, row_id_map=None, data=None, header=None, n=0.0, m=None, s=None):
super(RunningStatsWithBuffer, self).__init__(n, m, s)
self.path = path
if data is None:
self.data = []
else:
assert isinstance(data, list) and any(isinstance(i, list) for i in data)
self.data = data
if row_id_map is None:
self.row_id_map = {}
else:
assert isinstance(row_id_map, dict)
self.row_id_map = row_id_map
if header is None:
self.header = None
else:
assert isinstance(header, list)
self.header = header
def push(self, id, value, per_dim=True):
if id in self.row_id_map:
return
self.row_id_map[id] = len(self.data)
self.data.append(value if isinstance(value, list) else [value])
super(RunningStatsWithBuffer, self).push(value)
def __add__(self, other):
if isinstance(other, RunningStats):
for k, v in other.row_id_map.items():
if k in self.row_id_map:
continue
self.row_id_map[k] = len(self.data)
self.data.append(other.data[v])
data_array = np.array(self.data).copy().astype('float32')
return RunningStatsWithBuffer(
self.path,
self.row_id_map,
self.data,
self.header,
len(self.data),
np.nanmean(data_array, 0),
np.nanvar(data_array, 0),
)
else:
self.push(*other)
return self
def dump(self):
def natural_sort(l):
def convert(text):
return int(text) if text.isdigit() else text.lower()
return sorted(l, key=lambda key: [convert(c) for c in re.split('([0-9]+)', key[0])])
table = [self.header]
table.extend([[k] + self.data[v] for k, v in self.row_id_map.items()])
table[1:] = natural_sort(table[1:])
with open(self.path, 'w') as f:
writer = csv.writer(f)
writer.writerows(table)
@property
def mean(self):
data_array = np.array(self.data).copy().astype('float32')
return np.nanmean(data_array, 0)
def variance(self):
data_array = np.array(self.data).copy().astype('float32')
return np.nanvar(data_array, 0)
| CODD-main | utils/running_stats.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
import numpy as np
import torch
EPSILON = 1e-8
def epe_metric(d_est, d_gt, mask, use_np=False):
d_est, d_gt = d_est[mask], d_gt[mask]
if use_np:
epe = np.mean(np.abs(d_est - d_gt))
else:
epe = torch.mean(torch.abs(d_est - d_gt))
return epe
def t_epe_metric(d_est_t0, d_gt_t0, d_est_t1, d_gt_t1, mask_t0, mask_t1, use_np=False):
d_est = d_est_t0 - d_est_t1
d_gt = d_gt_t0 - d_gt_t1
# sanity_mask = (d_est_t0 > 0.0) & (d_est_t1 > 0.0) # disparity must be larger than 0
if use_np:
mask = np.logical_and(mask_t0, mask_t1)
# mask = np.logical_and(mask, sanity_mask)
mask = mask.astype(bool)
abs_err = np.abs(d_est - d_gt)[mask]
relative_err = abs_err / (np.abs(d_gt[mask]) + 1e-3)
else:
mask = torch.logical_and(mask_t0, mask_t1)
# mask = torch.logical_and(mask, sanity_mask)
mask = mask.bool()
abs_err = torch.abs(d_est - d_gt)[mask]
relative_err = abs_err / (torch.abs(d_gt[mask]) + 1e-3)
return abs_err, relative_err
def thres_metric(d_est, d_gt, mask, thres, use_np=False):
assert isinstance(thres, (int, float))
d_est, d_gt = d_est[mask], d_gt[mask]
if use_np:
e = np.abs(d_gt - d_est)
else:
e = torch.abs(d_gt - d_est)
err_mask = e > thres
if use_np:
mean = np.mean(err_mask.astype("float"))
else:
mean = torch.mean(err_mask.float())
return mean
def depth2normal(depth):
zy, zx = np.gradient(depth)
# or use Sobel to get a joint Gaussian smoothing and differentation to reduce noise
# zx = cv2.Sobel(d_im, cv2.CV_64F, 1, 0, ksize=5)
# zy = cv2.Sobel(d_im, cv2.CV_64F, 0, 1, ksize=5)
normal = np.dstack((-zx, -zy, np.ones_like(depth)))
n = np.linalg.norm(normal, axis=2)
normal[:, :, 0] /= n
normal[:, :, 1] /= n
normal[:, :, 2] /= n
# offset and rescale values to be in [0, 1]
normal += 1
normal /= 2
return normal
| CODD-main | utils/metric.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
import os
import re
from argparse import ArgumentParser
import numpy as np
from natsort import natsorted
def write_to_file(args, left_image, right_image, disparity, flow, disp_change, flow_occ, disp_frame2_in_frame1,
disp_occ, split):
fname = os.path.join(args.output_path, args.dataset + '_' + split + '.txt')
with open(fname, 'w') as f:
for idx in range(len(left_image)):
line = ' '.join([left_image[idx], right_image[idx], disparity[idx]])
if flow is not None:
line += ' ' + flow[idx]
else:
line += ' None'
if disp_change is not None:
line += ' ' + disp_change[idx]
else:
line += ' None'
if flow_occ is not None:
line += ' ' + flow_occ[idx]
else:
line += ' None'
if disp_frame2_in_frame1 is not None:
line += ' ' + disp_frame2_in_frame1[idx]
else:
line += ' None'
if disp_occ is not None:
line += ' ' + disp_occ[idx]
else:
line += ' None'
f.write(line + '\n')
def split_sceneflow(args, split):
# left images
left_image = []
if split == 'train' or split == 'val':
train_path = os.path.join(args.data_root, 'TRAIN')
else:
train_path = os.path.join(args.data_root, 'TEST')
# find all images
for root, dirs, files in os.walk(train_path):
if len(files) > 0 and 'left' in root:
for fname in files:
if '.png' in fname:
fname = os.path.join(root, fname).replace(args.data_root, '')
left_image.append(fname[1:]) # remove leading /
num_imgs = int(len(left_image) * (1 - args.val_ratio))
if split == 'train':
left_image = left_image[:num_imgs]
elif split == 'val':
left_image = left_image[num_imgs:]
left_image = natsorted(left_image)
# right images
right_image = []
for li in left_image:
right_image.append(li.replace('left', 'right'))
# disparity
disparity = []
for li in left_image:
disparity.append(li.replace('.png', '.pfm'))
# optical flow
flow = []
for li in left_image:
fname = li.replace('/left/', '/into_future/left/')
idx = re.search(f'\d+.png', li).group()
post = '_L.pfm'
pre = 'OpticalFlowIntoFuture_'
opt_idx = pre + idx.replace('.png', '') + post
flow.append(fname.replace(idx, opt_idx))
# disparity change
disp_change = []
for li in left_image:
fname = li.replace('/left/', '/into_future/left/')
disp_change.append(fname.replace('.png', '.pfm'))
# flow_occ
flow_occ = None
# disp_frame2_in_frame1
disp_frame2_in_frame1 = None
# disp_occ
disp_occ = None
write_to_file(args, left_image, right_image, disparity, flow, disp_change, flow_occ, disp_frame2_in_frame1,
disp_occ, split)
def split_kitti_depth(args, split):
val_split = ['2011_10_03/2011_10_03_drive_0042_sync/'] # 1 scene
test_split = ['2011_09_26/2011_09_26_drive_0002_sync', '2011_09_26/2011_09_26_drive_0005_sync/',
'2011_09_26/2011_09_26_drive_0013_sync/', '2011_09_26/2011_09_26_drive_0020_sync/',
'2011_09_26/2011_09_26_drive_0023_sync/', '2011_09_26/2011_09_26_drive_0036_sync/',
'2011_09_26/2011_09_26_drive_0079_sync/', '2011_09_26/2011_09_26_drive_0095_sync/',
'2011_09_26/2011_09_26_drive_0113_sync/', '2011_09_28/2011_09_28_drive_0037_sync/',
'2011_09_29/2011_09_29_drive_0026_sync/', '2011_09_30/2011_09_30_drive_0016_sync/',
'2011_10_03/2011_10_03_drive_0047_sync/'] # 13 scenes
# left images
left_image = []
# find all images
for root, dirs, files in os.walk(args.data_root):
if len(files) > 0 and 'image_02' in root:
if split == 'val':
for val_scene in val_split:
if val_scene not in root:
continue
else:
print(val_scene, root)
for fname in files:
if '.png' in fname:
fname = os.path.join(root, fname).replace(args.data_root, '')
left_image.append(fname[1:]) # remove leading /
elif split == 'test':
for test_scene in test_split:
if test_scene not in root:
continue
else:
for fname in files:
if '.png' in fname:
fname = os.path.join(root, fname).replace(args.data_root, '')
left_image.append(fname[1:]) # remove leading /
else: # the rest are training splits
for fname in files:
if '.png' in fname:
fname = os.path.join(root, fname).replace(args.data_root, '')
left_image.append(fname[1:]) # remove leading /
left_image = natsorted(left_image)
# right images
right_image = []
for li in left_image:
right_image.append(li.replace('image_02', 'image_03'))
# disparity
disparity = []
for li in left_image:
disparity.append(li.replace('image_02', 'disp'))
# optical flow
flow = []
for li in left_image:
flow.append(li.replace('image_02', 'flow'))
# disparity change
disp_change = None
# flow_occ
flow_occ = None
# disp_frame2_in_frame1
disp_frame2_in_frame1 = []
for li in left_image:
disp_frame2_in_frame1.append(li.replace('image_02', 'disp2'))
# disp_occ
disp_occ = None
write_to_file(args, left_image, right_image, disparity, flow, disp_change, flow_occ, disp_frame2_in_frame1,
disp_occ, split)
def split_kitti_2015(args, split):
# left images
left_image = []
# find all images
for root, dirs, files in os.walk(args.data_root):
if len(files) > 0 and 'training/image_2' in root:
for fname in files:
if '.png' in fname:
fname = os.path.join(root, fname).replace(args.data_root, '')
left_image.append(fname[1:]) # remove leading /
left_image = natsorted(left_image)
folds = np.array_split(np.stack(left_image), 5) # 5-fold cross validation
for fold in range(5):
if split == 'train':
left_image = [x for ii, x in enumerate(folds) if ii != fold]
left_image = np.concatenate(left_image)
elif split == 'val':
left_image = folds[fold]
num_images = len(left_image)
left_image = left_image[:int(num_images * 0.5)]
elif split == 'test':
left_image = folds[fold]
num_images = len(left_image)
left_image = folds[fold][int(num_images * 0.5):]
left_image = list(left_image)
# right images
right_image = []
for li in left_image:
right_image.append(li.replace('image_2', 'image_3'))
# disparity
disparity = []
for li in left_image:
if '_10' in li: # only disparity of first frame is provided
disparity.append(li.replace('image_2', 'disp_occ_0'))
else:
disparity.append('None')
# optical flow
flow = []
for li in left_image:
if '_10' in li: # only flow of first frame is provided
flow.append(li.replace('image_2', 'flow_occ'))
else:
flow.append('None')
# disparity change
disp_change = None
# flow_occ
flow_occ = None
# disp_frame2_in_frame1
disp_frame2_in_frame1 = []
for li in left_image:
if '_10' in li: # only disp2 of first frame is provided
disp_frame2_in_frame1.append(li.replace('image_2', 'disp_occ_1'))
else:
disp_frame2_in_frame1.append('None')
# disp_occ
disp_occ = None
write_to_file(args, left_image, right_image, disparity, flow, disp_change, flow_occ, disp_frame2_in_frame1,
disp_occ, split + str(fold))
def split_tartanair(args, split):
train_split = ['abandonedfactory', 'abandonedfactory_night', 'amusement', 'endofworld', 'gascola', 'hospital',
'japanesealley', 'neighborhood', 'ocean', 'office', 'office2', 'oldtown', 'seasidetown',
'seasonsforest_winter', 'soulcity', 'westerndesert']
test_split = ['carwelding']
val_split = ['seasonsforest']
# left images
left_image = []
# find all images
for root, dirs, files in os.walk(args.data_root):
if len(files) > 0 and 'image_left' in root:
if split == 'val':
for val_scene in val_split:
if val_scene not in root:
continue
else:
print(val_scene, root)
for fname in files:
if '.png' in fname:
fname = os.path.join(root, fname).replace(args.data_root, '')
left_image.append(fname[1:]) # remove leading /
elif split == 'test':
for test_scene in test_split:
if test_scene not in root:
continue
else:
for fname in files:
if '.png' in fname:
fname = os.path.join(root, fname).replace(args.data_root, '')
left_image.append(fname[1:]) # remove leading /
else: # the rest are training splits
for train_scene in train_split:
if train_scene not in root:
continue
else:
for fname in files:
if '.png' in fname:
fname = os.path.join(root, fname).replace(args.data_root, '')
left_image.append(fname[1:]) # remove leading /
left_image = natsorted(left_image)
# right images
right_image = []
for li in left_image:
right_image.append(li.replace('image_left', 'image_right').replace('_left.png', '_right.png'))
# disparity
disparity = []
for li in left_image:
disparity.append(li.replace('image_left', 'depth_left').replace('_left.png', '_left_depth.npy'))
# optical flow
flow = []
for li in left_image:
flow.append(li.replace('image_left', 'flow').replace('_left.png', '_flow.npy'))
# disparity change
disp_change = None
# flow_occ
flow_occ = []
for li in left_image:
flow.append(li.replace('image_left', 'flow').replace('_left.png', '_mask.npy'))
# disp_frame2_in_frame1
disp_frame2_in_frame1 = None
# disp_occ
disp_occ = None
write_to_file(args, left_image, right_image, disparity, flow, disp_change, flow_occ, disp_frame2_in_frame1,
disp_occ, split)
def main():
parser = ArgumentParser('split generation')
parser.add_argument('--dataset', type=str,
choices=['SceneFlow', 'KITTI_Depth', 'KITTI_2015', 'TartanAir', 'Sintel'])
parser.add_argument('--output_path', type=str, help='path to write the split files')
parser.add_argument('--val_ratio', type=float, default=0.1)
parser.add_argument('--data_root', type=str, help="Path to data (left and right images)")
args = parser.parse_args()
splits = ['train', 'val', 'test']
if args.dataset == 'SceneFlow':
for split in splits:
split_sceneflow(args, split)
elif args.dataset == 'KITTI_Depth':
for split in splits:
split_kitti_depth(args, split)
elif args.dataset == 'KITTI_2015':
for split in splits:
split_kitti_2015(args, split)
elif args.dataset == 'TartanAir':
for split in splits:
split_tartanair(args, split)
if __name__ == "__main__":
main()
| CODD-main | utils/generate_split_files.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
import torch
import torch.nn.functional as F
def normalize_coords(grid):
"""Normalize coordinates of image scale to [-1, 1]
Args:
grid: [B, 2, H, W]
"""
assert grid.size(1) == 2
h, w = grid.size()[2:]
grid[:, 0, :, :] = 2 * (grid[:, 0, :, :].clone() / (w - 1)) - 1 # x: [-1, 1]
grid[:, 1, :, :] = 2 * (grid[:, 1, :, :].clone() / (h - 1)) - 1 # y: [-1, 1]
grid = grid.permute((0, 2, 3, 1)) # [B, H, W, 2]
return grid
def meshgrid(img, homogeneous=False):
"""Generate meshgrid in image scale
Args:
img: [B, _, H, W]
homogeneous: whether to return homogeneous coordinates
Return:
grid: [B, 2, H, W]
"""
b, _, h, w = img.size()
x_range = torch.arange(0, w).view(1, 1, w).expand(1, h, w).type_as(img) # [1, H, W]
y_range = torch.arange(0, h).view(1, h, 1).expand(1, h, w).type_as(img)
grid = torch.cat((x_range, y_range), dim=0) # [2, H, W], grid[:, i, j] = [j, i]
grid = grid.unsqueeze(0).expand(b, 2, h, w) # [B, 2, H, W]
if homogeneous:
ones = torch.ones_like(x_range).unsqueeze(0).expand(b, 1, h, w) # [B, 1, H, W]
grid = torch.cat((grid, ones), dim=1) # [B, 3, H, W]
assert grid.size(1) == 3
return grid
def disp_warp(img, disp, padding_mode="border"):
"""Warping by disparity
Args:
img: [B, 3, H, W]
disp: [B, 1, H, W], positive
padding_mode: 'zeros' or 'border'
Returns:
warped_img: [B, 3, H, W]
valid_mask: [B, 3, H, W]
"""
grid = meshgrid(img) # [B, 2, H, W] in image scale
# Note that -disp here
offset = torch.cat((-disp, torch.zeros_like(disp)), dim=1) # [B, 2, H, W]
sample_grid = grid + offset
sample_grid = normalize_coords(sample_grid) # [B, H, W, 2] in [-1, 1]
warped_img = F.grid_sample(
img, sample_grid, mode="bilinear", padding_mode=padding_mode, align_corners=True
)
mask = torch.ones_like(img)
valid_mask = F.grid_sample(mask, sample_grid, mode="bilinear", padding_mode="zeros", align_corners=True)
valid_mask[valid_mask < 0.9999] = 0
valid_mask[valid_mask > 0] = 1
return warped_img, valid_mask.bool()
def flow_warp(img, flow, padding_mode="border", mode="bilinear"):
"""Warping by flow
Args:
img: [B, _, H, W]
flow: [B, 2, H, W]
padding_mode: 'zeros' or 'border'
Returns:
warped_img: [B, _, H, W]
valid_mask: [B, _, H, W]
"""
assert len(img.shape) == 4 and len(flow.shape) == 4, "Input must have 4 dimension"
assert flow.shape[1] == 2, "Flow must be channel=2"
grid = meshgrid(img) # [B, 2, H, W] in image scale
# Note that -disp here
sample_grid = grid + flow
sample_grid = normalize_coords(sample_grid) # [B, H, W, 2] in [-1, 1]
warped_img = F.grid_sample(img, sample_grid, mode=mode, padding_mode=padding_mode, align_corners=True)
mask = torch.ones_like(img)
valid_mask = F.grid_sample(mask, sample_grid, mode=mode, padding_mode="zeros", align_corners=True)
valid_mask[valid_mask < 0.9999] = 0
valid_mask[valid_mask > 0] = 1
return warped_img, valid_mask.bool()
def interpolate_value_disp(x, indices, maxdisp):
"""
bilinear interpolate tensor x at sampled indices
x: [B, D, H, W] (features)
indices: [B, H, W] sampled indices (0-indexed)
"""
# B,D,H,W to B,H,W,D
x = x.permute(0, 2, 3, 1)
indices = torch.unsqueeze(indices, -1)
indices = torch.clamp(indices, 0, maxdisp - 1)
idx0 = torch.floor(indices).long()
idx1 = torch.min(idx0 + 1, (maxdisp - 1) * torch.ones_like(idx0))
idx0 = torch.max(idx1 - 1, torch.zeros_like(idx0))
y0 = torch.gather(x, -1, idx0)
y1 = torch.gather(x, -1, idx1)
lmbda = indices - idx0.float()
output = (1 - lmbda) * y0 + (lmbda) * y1
output = torch.squeeze(output, -1)
return output
def get_disp_from_offset(pred, off, maxdisp, down):
_, pred = torch.max(pred, 1)
off = interpolate_value_disp(off, pred.float(), maxdisp // down)
pred = (pred + off) * down
return pred
def interpolate_value(x, indices, maxdepth):
"""
bilinear interpolate tensor x at sampled indices
x: [B, D, H, W] (features)
val: [B, H, W] sampled indices (1-indexed)
"""
# B,D,H,W to B,H,W,D
x = x.permute(0, 2, 3, 1)
indices = torch.unsqueeze(indices - 1, -1)
indices = torch.clamp(indices, 0, maxdepth - 1)
idx0 = torch.floor(indices).long()
idx1 = torch.min(idx0 + 1, (maxdepth - 1) * torch.ones_like(idx0))
idx0 = torch.max(idx1 - 1, torch.zeros_like(idx0))
y0 = torch.gather(x, -1, idx0)
y1 = torch.gather(x, -1, idx1)
lmbda = indices - idx0.float()
output = (1 - lmbda) * y0 + (lmbda) * y1
output = torch.squeeze(output, -1)
return output
def get_depth_from_offset(pred, off, mindepth=1, scale=1):
_, pred = torch.max(pred, 1, keepdim=True)
off = torch.gather(off, 1, pred)
pred = pred + mindepth # Make 1-indexed
pred = (pred + off) * scale
return torch.squeeze(pred, 1)
| CODD-main | utils/warp.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
import os
import re
import time
from argparse import ArgumentParser
import cv2
import numpy as np
import open3d as o3d
from natsort import natsorted
from tqdm import tqdm
class InteractivePCDVisualizer(object):
def __call__(self, pcd_list):
o3d.visualization.draw_geometries(pcd_list)
class VideoPCDVisualizer(object):
def __init__(self, save_path, frame_rate, size=(1600, 1600)):
self.vis = o3d.visualization.Visualizer()
self.frame_rate = float(frame_rate)
self.save_path = save_path
self.width, self.height = size
def __call__(self, frames_pcds):
"""
frames_pcds is a list of lists. The outer list holds the frame
pointclouds for the video. The inner list holds the pointclouds for each frame.
pointclouds must be o3d.geometry.PointCloud() objects
"""
self.vis.create_window(width=self.width, height=self.height)
rgb_list = []
for frame_index, frame_pcds in enumerate(frames_pcds):
ctr = self.vis.get_view_control()
for pcd in frame_pcds:
reset_bounding_box = False if frame_index > 0 else True
self.vis.add_geometry(pcd, reset_bounding_box=reset_bounding_box)
# if frame_index == 0:
# ctr.set_up(self.up)
# ctr.set_lookat(self.lookat)
# ctr.set_front(self.front)
# ctr.set_zoom(self.zoom)
opt = self.vis.get_render_option()
opt.point_size = point_size
opt.background_color = [0, 0, 0]
self.vis.poll_events()
self.vis.update_renderer()
for i, frame_pcd in enumerate(frame_pcds):
self.vis.remove_geometry(frame_pcd, reset_bounding_box=False)
rgb = self.vis.capture_screen_float_buffer()
rgb = np.array(rgb) * 255
rgb_list.append(rgb[:, :, ::-1].astype(np.uint8))
time.sleep(1.0 / self.frame_rate)
output_file = cv2.VideoWriter(
filename=self.save_path,
fourcc=cv2.VideoWriter_fourcc(*"mp4v"),
fps=self.frame_rate,
frameSize=(rgb_list[0].shape[1], rgb_list[0].shape[0]),
isColor=True,
)
for rgb in rgb_list:
output_file.write(rgb)
output_file.release()
class PCDBuilder(object):
def __init__(self, fx, fy, cx, cy, baseline):
self.camera = o3d.camera.PinholeCameraIntrinsic()
self.camera.intrinsic_matrix = [[fx, 0, cx], [0, fy, cy], [0, 0, 1]]
self.baseline = baseline
def pcd_from_rgbd(self, color, disp, disp_trunc, remove_flying):
disp[disp < disp_trunc[0]] = 0.0
disp[disp > disp_trunc[1]] = 0.0
color_raw = o3d.geometry.Image(cv2.cvtColor(color, cv2.COLOR_BGR2RGB))
depth_raw = self.camera.intrinsic_matrix[0, 0] / (disp + 1e-5) * self.baseline
depth_raw = o3d.geometry.Image(depth_raw)
rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(
color_raw, depth_raw, depth_trunc=3.0, convert_rgb_to_intensity=False
)
pcd = o3d.geometry.PointCloud.create_from_rgbd_image(rgbd_image, self.camera)
if remove_flying:
pcd, _ = pcd.remove_statistical_outlier(10, 5)
# Flip it, otherwise the pointcloud will be upside down
pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
return pcd
def __call__(self, color, depth, depth_trunc, remove_flying):
frame_pcds = []
for idx, img in enumerate(tqdm(color, desc="Creating pcds")):
single_frame_pcds = [
self.pcd_from_rgbd(img, depth[idx], depth_trunc, remove_flying)]
frame_pcds.append(single_frame_pcds)
return frame_pcds
def load_depth_path(color_path, revise_keys=[('img_left', 'Depth'), ('RGB_0_Rectified', 'Depth_sf')]):
depth_path = color_path
for p, r in revise_keys:
depth_path = re.sub(p, r, depth_path)
return depth_path
def main(args):
if args.fy is None:
args.fy = args.fx
if args.cx is None:
args.cx = args.shape[0] / 2
if args.cy is None:
args.cy = args.shape[1] / 2
color_path = args.input
depth_path = args.depth
img_fname_list = natsorted(os.listdir(color_path))
depth_fname_list = natsorted(os.listdir(depth_path))
img_list = []
for idx, fname in enumerate(img_fname_list):
if os.path.splitext(fname)[-1] != '.png':
continue
if idx < args.start_frame:
continue
img = cv2.imread(os.path.join(color_path, fname), cv2.IMREAD_COLOR)
img_list.append(img)
if not args.video:
break
if 0 < args.num_frames <= len(img_list):
break
disp_list = []
for idx, fname in enumerate(depth_fname_list):
if os.path.splitext(fname)[-1] != '.npz':
continue
if idx * 50 < args.start_frame:
continue
disp = np.load(os.path.join(depth_path, fname))['disp']
disp_list.append(disp)
if not args.video:
break
if 0 < args.num_frames and args.num_frames / 50 <= len(disp_list):
break
disp_list = np.concatenate(disp_list, axis=0)
if len(img_list) < disp_list.shape[0]:
disp_list = disp_list[:len(img_list)]
elif len(img_list) > disp_list.shape[0]:
img_list = img_list[:disp_list.shape[0]]
# crop
h, w = img_list[0].shape[:2]
border_h, border_w = int(args.shrink[1] * h), int(args.shrink[0] * w)
pcd_builder = PCDBuilder(args.fx, args.fy, args.cx - border_w, args.cy - border_h, args.baseline)
d_list = []
for idx, color in enumerate(img_list):
border_h_b, border_w_r = int(args.shrink[-1] * h), int(args.shrink[-2] * w)
color = color[border_h:-border_h_b, border_w:-border_w_r]
img_list[idx] = color
disp = disp_list[idx, border_h:-border_h_b, border_w:-border_w_r]
d_list.append(disp)
frame_pcds = pcd_builder(img_list, d_list, args.disp_trunc, args.remove_flying)
if not args.video:
InteractivePCDVisualizer()(frame_pcds[0])
else:
VideoPCDVisualizer(args.output, args.frame_rate)(frame_pcds)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--video", action='store_true', help='Save visualization to video')
parser.add_argument("--frame-rate", default=30)
parser.add_argument("--input", help="Directory to input images")
parser.add_argument("--depth", help="Directory to depth images")
parser.add_argument("--output", help="A file or directory to save output visualizations. "
"If not given, will show output in an OpenCV window.")
parser.add_argument("--fx", default=51.2 / 36 * 1024,
type=float, help="focal length along x-axis (longer side) in pixels")
parser.add_argument("--fy", default=None,
type=float, help="focal length along y-axis (shorter side) in pixels")
parser.add_argument("--cx", default=None, type=float, help="centre of image along x-axis")
parser.add_argument("--cy", default=None, type=float, help="centre of image along y-axis")
parser.add_argument("--baseline", default=1.0, type=float, help="baseline")
parser.add_argument("--shape", type=int, nargs="+", default=[1600, 1200], help="input image size [W, H]")
parser.add_argument("--disp_trunc", type=float, nargs='+', default=[1.0, 210.0])
parser.add_argument("--shrink", nargs='+', type=float, default=[0.1] * 4, help='left top right bottom')
parser.add_argument("--point_size", type=int, default=3)
parser.add_argument("--num_frames", default=-1, type=int)
parser.add_argument("--remove_flying", action='store_true')
parser.add_argument("--start_frame", type=int, default=0)
args = parser.parse_args()
point_size = args.point_size
main(args)
| CODD-main | utils/vis_point_cloud.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
_base_ = [
'models/consistent_online_depth_network.py', 'datasets/custom.py',
'default_runtime.py'
]
| CODD-main | configs/inference_config.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
_base_ = [
'models/codd.py', 'datasets/scene_flow.py',
'default_runtime.py', 'schedules/schedule_stereo.py'
] | CODD-main | configs/training_config.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')
])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
cudnn_benchmark = True
| CODD-main | configs/default_runtime.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# pseudo camera parameters that doesn't really matter for inference
intrinsics = [640, 360, 1050, 1050]
calib = 210
disp_range = (1, 210)
depth_range = (calib / 210.0, calib / 1.0)
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
pipeline = [
dict(type='LoadImagesFromFile'),
dict(type="LoadRImagesFromFile"),
dict(
type='MultiScaleFlipAug',
img_ratios=[1.0],
img_scale=None,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=64),
dict(type="DefaultFormatBundleList"),
dict(type='Collect',
keys=["img", "r_img"],
meta_keys=[
"filename",
"ori_filename",
"ori_shape",
"img_shape",
"pad_shape",
"calib",
"disp_range",
"depth_range",
"intrinsics",
],
),
])
]
data = dict(
test=dict(
type="CustomStereoMultiFrameDataset",
test_mode=True,
img_dir=None,
r_img_dir=None,
ann_dir=None,
disp_dir=None,
img_suffix=".png",
r_img_suffix=".png",
split=None,
pipeline=pipeline,
num_samples=-1,
calib=calib,
disp_range=disp_range,
depth_range=depth_range,
num_frames=-1,
prefix_pattern=r'\d+.+.png',
intrinsics=intrinsics
),
)
| CODD-main | configs/datasets/custom.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# dataset settings
dataset_type = "TartanAirMultiFrameDataset"
data_root = "PATH_TO_DATA"
train_split = "PATH_TO_SPLIT"
val_split = "PATH_TO_SPLIT"
test_split = "PATH_TO_SPLIT"
calib = 320 * 0.25 # from https://github.com/castacks/tartanair_tools/blob/master/data_type.md
disp_range = (1.0, 210.0)
depth_range = (calib / disp_range[1], calib / disp_range[0])
intrinsics = [320, 320, 320, 240] # https://github.com/castacks/tartanair_tools/blob/master/data_type.md
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
batch_size = 4
crop_size = (448, 640)
train_pipeline = [
dict(type="LoadImagesFromFile"),
dict(type="LoadRImagesFromFile"),
dict(type="LoadDispAnnotations", imdecode_backend="tartanair", key="disp", is_reciprocal=True, calib=calib),
dict(type="LoadOpticalFlowAnnotations", imdecode_backend="tartanair", key="flow"),
dict(type="LoadOcclusionAnnotations", imdecode_backend="tartanair", key="flow_occ"),
dict(type="RandomCrop", crop_size=crop_size),
dict(type="PhotoMetricDistortion"),
dict(type="Normalize", **img_norm_cfg),
dict(type="Pad", size=crop_size, pad_val=0, seg_pad_val=255, disp_pad_val=0),
dict(type="DefaultFormatBundleList"),
dict(
type="Collect",
keys=["img", "r_img", "gt_disp", "gt_flow", "gt_flow_occ"],
meta_keys=[
"filename",
"ori_filename",
"ori_shape",
"img_shape",
"pad_shape",
"img_norm_cfg",
"calib",
"disp_range",
"depth_range",
"intrinsics",
],
),
]
test_pipeline = [
dict(type='LoadImagesFromFile'),
dict(type="LoadRImagesFromFile"),
dict(type="LoadDispAnnotations", imdecode_backend="tartanair", key="disp", is_reciprocal=True, calib=calib),
dict(type="LoadOpticalFlowAnnotations", imdecode_backend="tartanair", key="flow"),
dict(type="LoadOcclusionAnnotations", imdecode_backend="tartanair", key="flow_occ"),
dict(
type='MultiScaleFlipAug',
img_ratios=[1.0],
img_scale=None,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=64),
dict(type="DefaultFormatBundleList"),
dict(type='Collect',
keys=["img", "r_img", "gt_disp", "gt_flow", "gt_flow_occ"],
meta_keys=[
"filename",
"ori_filename",
"ori_shape",
"img_shape",
"pad_shape",
"calib",
"disp_range",
"depth_range",
"intrinsics",
],
),
])
]
data = dict(
samples_per_gpu=batch_size,
workers_per_gpu=batch_size,
train=dict(
type=dataset_type,
disp_range=disp_range,
calib=calib,
depth_range=depth_range,
img_dir=data_root,
r_img_dir=data_root,
disp_dir=data_root,
flow_dir=data_root,
flow_occ_dir=data_root,
num_frames=2,
intrinsics=intrinsics,
split=train_split,
pipeline=train_pipeline,
),
val=dict(
type=dataset_type,
disp_range=disp_range,
calib=calib,
depth_range=depth_range,
img_dir=data_root,
r_img_dir=data_root,
disp_dir=data_root,
flow_dir=data_root,
flow_occ_dir=data_root,
num_frames=-1,
intrinsics=intrinsics,
split=val_split,
pipeline=test_pipeline,
),
test=dict(
type=dataset_type,
disp_range=disp_range,
calib=calib,
depth_range=depth_range,
img_dir=data_root,
r_img_dir=data_root,
disp_dir=data_root,
flow_dir=data_root,
flow_occ_dir=data_root,
num_frames=-1,
intrinsics=intrinsics,
split=test_split,
pipeline=test_pipeline,
),
)
| CODD-main | configs/datasets/tartanair.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# dataset settings
dataset_type = "SceneFlowMultiFrameDataset"
data_root = "PATH_TO_STEREO_IMG"
disp_root = "PATH_TO_DISPARITY"
flow_root = "PATH_TO_FLOW"
disp_change_root = "PATH_TO_DISPARITY_CHANGE"
train_split = "PATH_TO_SPLIT"
val_split = "PATH_TO_SPLIT"
test_split = "PATH_TO_SPLIT"
calib = 1050
disp_range = (1.0, 210.0)
depth_range = (calib / disp_range[1], calib / disp_range[0])
intrinsics = [1050, 1050, 480, 270]
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
batch_size = 4
crop_size = (384, 768)
train_pipeline = [
dict(type="LoadImagesFromFile"),
dict(type="LoadRImagesFromFile"),
dict(type="LoadDispAnnotations", imdecode_backend="pfm", key="disp"),
dict(type="LoadOpticalFlowAnnotations", imdecode_backend="pfm", key="flow"),
dict(type="LoadDispAnnotations", imdecode_backend="pfm", key="disp_change"),
dict(type="RandomCrop", crop_size=crop_size),
dict(type="PhotoMetricDistortion", asym=True),
dict(type="Normalize", **img_norm_cfg),
dict(type="DefaultFormatBundleList"),
dict(
type="Collect",
keys=["img", "r_img", "gt_disp", "gt_flow", "gt_disp_change"],
meta_keys=[
"filename",
"ori_filename",
"ori_shape",
"img_shape",
"pad_shape",
"img_norm_cfg",
"calib",
"disp_range",
"depth_range",
"intrinsics",
],
),
]
test_pipeline = [
dict(type='LoadImagesFromFile'),
dict(type="LoadRImagesFromFile"),
dict(type="LoadDispAnnotations", imdecode_backend="pfm", key="disp"),
dict(type="LoadOpticalFlowAnnotations", imdecode_backend="pfm", key="flow"),
dict(type="LoadDispAnnotations", imdecode_backend="pfm", key="disp_change"),
dict(
type='MultiScaleFlipAug',
img_ratios=[1.0],
img_scale=None,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=64),
dict(type="DefaultFormatBundleList"),
dict(type='Collect',
keys=["img", "r_img", "gt_disp", "gt_flow", "gt_disp_change"],
meta_keys=[
"filename",
"ori_filename",
"ori_shape",
"img_shape",
"pad_shape",
"calib",
"disp_range",
"depth_range",
"intrinsics",
],
),
])
]
data = dict(
samples_per_gpu=batch_size,
workers_per_gpu=batch_size,
train=dict(
type=dataset_type,
disp_range=disp_range,
calib=calib,
depth_range=depth_range,
img_dir=data_root,
r_img_dir=data_root,
disp_dir=disp_root,
flow_dir=flow_root,
disp_change_dir=disp_change_root,
num_frames=2,
intrinsics=intrinsics,
split=train_split,
pipeline=train_pipeline,
),
val=dict(
type=dataset_type,
disp_range=disp_range,
calib=calib,
depth_range=depth_range,
img_dir=data_root,
r_img_dir=data_root,
disp_dir=disp_root,
flow_dir=flow_root,
disp_change_dir=disp_change_root,
num_frames=-1,
intrinsics=intrinsics,
split=val_split,
pipeline=test_pipeline,
),
test=dict(
type=dataset_type,
disp_range=disp_range,
calib=calib,
depth_range=depth_range,
img_dir=data_root,
r_img_dir=data_root,
disp_dir=disp_root,
flow_dir=flow_root,
disp_change_dir=disp_change_root,
num_frames=-1,
intrinsics=intrinsics,
split=test_split,
pipeline=test_pipeline,
),
)
| CODD-main | configs/datasets/scene_flow.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# dataset settings
dataset_type = "SintelMultiFrameDataset"
data_root = "PATH_TO_DATA"
flow_root = "PATH_TO_FLOW"
train_split = "PATH_TO_SPLIT"
val_split = "PATH_TO_SPLIT"
test_split = "PATH_TO_SPLIT"
calib = 688 * 0.01
disp_range = (1.0, 210.0)
depth_range = (calib / disp_range[1], calib / disp_range[0])
intrinsics = [688, 688, 512,
218] # fx=fy=688, cx=512, cy=218, (from depth folder camera data), baseline=10cm (from stereo data README)
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
batch_size = 4
crop_size = (320, 1024)
train_pipeline = [
dict(type="LoadImagesFromFile"),
dict(type="LoadRImagesFromFile"),
dict(type="LoadDispAnnotations", imdecode_backend="sintel", key="disp"),
dict(type="LoadOpticalFlowAnnotations", imdecode_backend="sintel", key="flow"),
dict(type="LoadOcclusionAnnotations", key="flow_occ"),
dict(type="RandomCrop", crop_size=crop_size),
dict(type="StereoPhotoMetricDistortion"),
dict(type="Normalize", **img_norm_cfg),
dict(type="DefaultFormatBundleList"),
dict(
type="Collect",
keys=["img", "r_img", "gt_disp", "gt_flow", "gt_flow_occ"],
meta_keys=[
"filename",
"ori_filename",
"ori_shape",
"img_shape",
"pad_shape",
"img_norm_cfg",
"calib",
"disp_range",
"depth_range",
"intrinsics",
],
),
]
test_pipeline = [
dict(type='LoadImagesFromFile'),
dict(type="LoadRImagesFromFile"),
dict(type="LoadDispAnnotations", imdecode_backend="sintel", key="disp"),
dict(type="LoadOpticalFlowAnnotations", imdecode_backend="sintel", key="flow"),
dict(type="LoadOcclusionAnnotations", key="flow_occ"),
dict(
type='MultiScaleFlipAug',
img_ratios=[1.0],
img_scale=None,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=64),
dict(type="DefaultFormatBundleList"),
dict(type='Collect',
keys=["img", "r_img", "gt_disp", "gt_flow", "gt_flow_occ"],
meta_keys=[
"filename",
"ori_filename",
"ori_shape",
"img_shape",
"pad_shape",
"calib",
"disp_range",
"depth_range",
"intrinsics",
],
),
])
]
data = dict(
samples_per_gpu=batch_size,
workers_per_gpu=batch_size,
train=dict(
type=dataset_type,
disp_range=disp_range,
calib=calib,
depth_range=depth_range,
img_dir=data_root,
r_img_dir=data_root,
disp_dir=data_root,
flow_dir=flow_root,
flow_occ_dir=flow_root,
num_frames=2,
intrinsics=intrinsics,
split=train_split,
pipeline=train_pipeline,
),
val=dict(
type=dataset_type,
disp_range=disp_range,
calib=calib,
depth_range=depth_range,
img_dir=data_root,
r_img_dir=data_root,
disp_dir=data_root,
flow_dir=flow_root,
flow_occ_dir=flow_root,
num_frames=-1,
intrinsics=intrinsics,
split=val_split,
pipeline=test_pipeline,
),
test=dict(
type=dataset_type,
disp_range=disp_range,
calib=calib,
depth_range=depth_range,
img_dir=data_root,
r_img_dir=data_root,
disp_dir=data_root,
flow_dir=flow_root,
flow_occ_dir=flow_root,
num_frames=-1,
intrinsics=intrinsics,
split=test_split,
pipeline=test_pipeline,
),
)
| CODD-main | configs/datasets/sintel.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# dataset settings
dataset_type = "KittiDepthMultiFrameDataset"
data_root = "PATH_TO_DATA"
train_split = "PATH_TO_SPLIT"
val_split = "PATH_TO_SPLIT"
test_split = "PATH_TO_SPLIT"
calib = 384.38 # from raw data calibration result
disp_range = (1.0, 210.0)
depth_range = (calib / disp_range[1], calib / disp_range[0])
intrinsics = [721.54, 721.54, 621, 187.5] # image resolution 1242, 375
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
batch_size = 4
crop_size = (320, 960)
train_pipeline = [
dict(type="LoadImagesFromFile"),
dict(type="LoadRImagesFromFile"),
dict(type="LoadDispAnnotations", imdecode_backend="kitti", key="disp", is_reciprocal=False),
dict(type="LoadOpticalFlowAnnotations", imdecode_backend="kitti", key="flow"),
dict(type="LoadDispAnnotations", imdecode_backend="kitti", key="disp2", is_reciprocal=False),
dict(type="RandomCrop", crop_size=crop_size),
dict(type="PhotoMetricDistortion"),
dict(type="Normalize", **img_norm_cfg),
dict(type="Pad", size=crop_size, pad_val=0, seg_pad_val=255, disp_pad_val=0),
dict(type="DefaultFormatBundleList"),
dict(
type="Collect",
keys=["img", "r_img", "gt_disp", "gt_flow", "gt_disp2"],
meta_keys=[
"filename",
"ori_filename",
"ori_shape",
"img_shape",
"pad_shape",
"img_norm_cfg",
"calib",
"disp_range",
"depth_range",
"intrinsics",
],
),
]
test_pipeline = [
dict(type='LoadImagesFromFile'),
dict(type="LoadRImagesFromFile"),
dict(type="LoadDispAnnotations", imdecode_backend="kitti", key="disp", is_reciprocal=False),
dict(type="LoadOpticalFlowAnnotations", imdecode_backend="kitti", key="flow"),
dict(type="LoadDispAnnotations", imdecode_backend="kitti", key="disp2", is_reciprocal=False),
dict(
type='MultiScaleFlipAug',
img_ratios=[1.0],
img_scale=None,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=64),
dict(type="DefaultFormatBundleList"),
dict(type='Collect',
keys=["img", "r_img", "gt_disp", "gt_flow", "gt_disp2"],
meta_keys=[
"filename",
"ori_filename",
"ori_shape",
"img_shape",
"pad_shape",
"calib",
"disp_range",
"depth_range",
"intrinsics",
],
),
])
]
data = dict(
samples_per_gpu=batch_size,
workers_per_gpu=batch_size,
train=dict(
type=dataset_type,
disp_range=disp_range,
calib=calib,
depth_range=depth_range,
img_dir=data_root,
r_img_dir=data_root,
disp_dir=data_root,
flow_dir=data_root,
disp2_dir=data_root,
num_frames=2,
intrinsics=intrinsics,
split=train_split,
pipeline=train_pipeline,
),
val=dict(
type=dataset_type,
disp_range=disp_range,
calib=calib,
depth_range=depth_range,
img_dir=data_root,
r_img_dir=data_root,
disp_dir=data_root,
flow_dir=data_root,
disp2_dir=data_root,
num_frames=-1,
intrinsics=intrinsics,
split=val_split,
pipeline=test_pipeline,
),
test=dict(
type=dataset_type,
disp_range=disp_range,
calib=calib,
depth_range=depth_range,
img_dir=data_root,
r_img_dir=data_root,
disp_dir=data_root,
flow_dir=data_root,
disp2_dir=data_root,
num_frames=-1,
intrinsics=intrinsics,
split=test_split,
pipeline=test_pipeline,
),
)
| CODD-main | configs/datasets/kitti_depth.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# dataset settings
dataset_type = "Kitti2015MultiFrameDataset"
data_root = "PATH_TO_DATA"
train_split = "PATH_TO_SPLIT"
val_split = "PATH_TO_SPLIT"
test_split = "PATH_TO_SPLIT"
calib = 384.38 # from raw data calibration result
disp_range = (1.0, 210.0)
depth_range = (calib / disp_range[1], calib / disp_range[0])
intrinsics = [721.54, 721.54, 621, 187.5] # image resolution 1242, 375
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
batch_size = 4
crop_size = (320, 960)
train_pipeline = [
dict(type="LoadImagesFromFile"),
dict(type="LoadRImagesFromFile"),
dict(type="LoadDispAnnotations", imdecode_backend="kitti", key="disp", is_reciprocal=False),
dict(type="LoadOpticalFlowAnnotations", imdecode_backend="kitti", key="flow"),
dict(type="LoadDispAnnotations", imdecode_backend="kitti", key="disp2", is_reciprocal=False),
dict(type="RandomCrop", crop_size=crop_size),
dict(type="PhotoMetricDistortion"),
dict(type="Normalize", **img_norm_cfg),
dict(type="Pad", size=crop_size, pad_val=0, seg_pad_val=255, disp_pad_val=0),
dict(type="DefaultFormatBundleList"),
dict(
type="Collect",
keys=["img", "r_img", "gt_disp", "gt_flow", "gt_disp2"],
meta_keys=[
"filename",
"ori_filename",
"ori_shape",
"img_shape",
"pad_shape",
"img_norm_cfg",
"calib",
"disp_range",
"depth_range",
"intrinsics",
],
),
]
test_pipeline = [
dict(type='LoadImagesFromFile'),
dict(type="LoadRImagesFromFile"),
dict(type="LoadDispAnnotations", imdecode_backend="kitti", key="disp", is_reciprocal=False),
dict(type="LoadOpticalFlowAnnotations", imdecode_backend="kitti", key="flow"),
dict(type="LoadDispAnnotations", imdecode_backend="kitti", key="disp2", is_reciprocal=False),
dict(
type='MultiScaleFlipAug',
img_ratios=[1.0],
img_scale=None,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=64),
dict(type="DefaultFormatBundleList"),
dict(type='Collect',
keys=["img", "r_img", "gt_disp", "gt_flow", "gt_disp2"],
meta_keys=[
"filename",
"ori_filename",
"ori_shape",
"img_shape",
"pad_shape",
"calib",
"disp_range",
"depth_range",
"intrinsics",
],
),
])
]
data = dict(
samples_per_gpu=batch_size,
workers_per_gpu=batch_size,
train=dict(
type=dataset_type,
disp_range=disp_range,
calib=calib,
depth_range=depth_range,
img_dir=data_root,
r_img_dir=data_root,
disp_dir=data_root,
flow_dir=data_root,
disp2_dir=data_root,
num_frames=2,
intrinsics=intrinsics,
split=train_split,
pipeline=train_pipeline,
),
val=dict(
type=dataset_type,
disp_range=disp_range,
calib=calib,
depth_range=depth_range,
img_dir=data_root,
r_img_dir=data_root,
disp_dir=data_root,
flow_dir=data_root,
disp2_dir=data_root,
num_frames=-1,
intrinsics=intrinsics,
split=val_split,
pipeline=test_pipeline,
),
test=dict(
type=dataset_type,
disp_range=disp_range,
calib=calib,
depth_range=depth_range,
img_dir=data_root,
r_img_dir=data_root,
disp_dir=data_root,
flow_dir=data_root,
disp2_dir=data_root,
num_frames=-1,
intrinsics=intrinsics,
split=test_split,
pipeline=test_pipeline,
),
)
| CODD-main | configs/datasets/kitti_2015.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# model settings
max_disp = 320
iters = 1 # 16 for scene flow/KITTI, 1 for Sintel/TartanAir
motion_loss_weight = 1.0 # 0.5 for joint training tartan/KITTI, 1.0 for pretrain
freeze_stereo = True
freeze_motion = False
if freeze_stereo or freeze_motion:
find_unused_parameters = True
model = dict(
type='ConsistentOnlineDynamicDepth',
stereo=dict(
type='HITNetMF',
backbone=dict(
type='HITUNet',
),
initialization=dict(
type='TileInitialization',
max_disp=max_disp,
),
propagation=dict(
type='TilePropagation',
),
loss=dict(
type='HITLoss',
max_disp=max_disp,
alpha=0.9,
c=0.1,
),
),
motion=dict(
type="Motion",
iters=iters,
raft3d=dict(
type="RAFT3D",
cnet_cfg=dict(
init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18_small'),
# when training from scratch, include this line to initialize the weights
type='HRNet',
norm_cfg=dict(type='SyncBN', requires_grad=False),
norm_eval=True,
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(2,),
num_channels=(64,)),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(2, 2),
num_channels=(18, 36)),
stage3=dict(
num_modules=3,
num_branches=3,
block='BASIC',
num_blocks=(2, 2, 2),
num_channels=(18, 36, 72)),
stage4=dict(
num_modules=2,
num_branches=4,
block='BASIC',
num_blocks=(2, 2, 2, 2),
num_channels=(18, 36, 72, 144))
)
)
),
loss=dict(
type='MotionLoss',
loss_weight=motion_loss_weight
),
),
train_cfg=dict(
freeze_stereo=freeze_stereo,
freeze_motion=freeze_motion,
),
test_cfg=dict(mode='whole')
)
| CODD-main | configs/models/stereo_motion.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# model settings
max_disp = 320
iters = 16 # 16 for scene flow/KITTI, 1 for Sintel/TartanAir
motion_loss_weight = 0.5 # 0.5 for joint training tartan/KITTI, 1.0 for pretrain
fusion_loss_weight = 1.0
wr_weight = 1.0
wf_weight = 1.0
freeze_stereo = False
freeze_motion = False
freeze_fusion = False
if freeze_stereo or freeze_motion or freeze_fusion:
find_unused_parameters = True
model = dict(
type='ConsistentOnlineDynamicDepth',
stereo=dict(
type='HITNetMF',
backbone=dict(
type='HITUNet',
),
initialization=dict(
type='TileInitialization',
max_disp=max_disp,
),
propagation=dict(
type='TilePropagation',
),
loss=dict(
type='HITLoss',
max_disp=max_disp,
alpha=0.9,
c=0.1,
),
),
motion=dict(
type="Motion",
iters=iters,
raft3d=dict(
type="RAFT3D",
cnet_cfg=dict(
type='HRNet',
norm_cfg=dict(type='SyncBN', requires_grad=False),
norm_eval=True,
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(2,),
num_channels=(64,)),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(2, 2),
num_channels=(18, 36)),
stage3=dict(
num_modules=3,
num_branches=3,
block='BASIC',
num_blocks=(2, 2, 2),
num_channels=(18, 36, 72)),
stage4=dict(
num_modules=2,
num_branches=4,
block='BASIC',
num_blocks=(2, 2, 2, 2),
num_channels=(18, 36, 72, 144))
)
)
),
loss=dict(
type='MotionLoss',
loss_weight=motion_loss_weight
),
),
fusion=dict(
type="Fusion",
in_channels=24,
fusion_channel=32,
corr_cfg=dict(type='px2patch', patch_size=3),
loss=dict(
type='FusionLoss',
loss_weight=fusion_loss_weight,
min_disp=1,
max_disp=320,
wr_weight=wr_weight,
wf_weight=wf_weight
),
),
train_cfg=dict(
freeze_stereo=freeze_stereo,
freeze_motion=freeze_motion,
freeze_fusion=freeze_fusion,
),
test_cfg=dict(mode='whole')
)
| CODD-main | configs/models/codd.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# model settings
max_disp = 320
freeze_stereo = False
freeze_motion = True
freeze_fusion = True
if freeze_stereo or freeze_motion or freeze_fusion:
find_unused_parameters = True
model = dict(
type='ConsistentOnlineDynamicDepth',
stereo=dict(
type='HITNetMF',
backbone=dict(
type='HITUNet',
),
initialization=dict(
type='TileInitialization',
max_disp=max_disp,
),
propagation=dict(
type='TilePropagation',
),
loss=dict(
type='HITLoss',
max_disp=max_disp,
alpha=0.9,
c=0.1,
),
),
train_cfg=dict(
freeze_stereo=freeze_stereo,
freeze_motion=freeze_motion,
freeze_fusion=freeze_fusion,
),
test_cfg=dict(mode='whole')
)
| CODD-main | configs/models/stereo.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# optimizer
gpu_factor = 8
max_iter = 100000 // gpu_factor
optimizer = dict(type="Adam", lr=2e-4, weight_decay=0.00001)
optimizer_config = dict(grad_clip=dict(max_norm=1))
# learning policy
lr_config = dict(
policy="OneCycle",
max_lr=2e-4,
total_steps=max_iter,
pct_start=0.001,
anneal_strategy="linear"
)
# runtime settings
runner = dict(type="IterBasedRunner", max_iters=max_iter)
checkpoint_config = dict(by_epoch=False, interval=5000 // gpu_factor)
evaluation = dict(interval=5000 // gpu_factor, metric="default")
| CODD-main | configs/schedules/schedule_fusion.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# optimizer
optimizer = dict(type='Adam', lr=4e-4, betas=(0.9, 0.999))
optimizer_config = dict()
# learning policy
lr_config = dict(policy='MultiGamma', step=[225, 293, 315], gamma=[0.25, 0.4, 0.25])
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=340) # Following HITNet
checkpoint_config = dict(by_epoch=True, interval=20)
evaluation = dict(interval=10, metric='default')
| CODD-main | configs/schedules/schedule_stereo.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# optimizer
gpu_factor = 8
max_iter = 200000 // gpu_factor
optimizer = dict(type="Adam", lr=2e-4, weight_decay=0.00001)
optimizer_config = dict(grad_clip=dict(max_norm=1))
# learning policy
lr_config = dict(
policy="OneCycle",
max_lr=2e-4,
total_steps=max_iter,
pct_start=0.001,
anneal_strategy="linear"
)
# runtime settings
runner = dict(type="IterBasedRunner", max_iters=max_iter)
checkpoint_config = dict(by_epoch=False, interval=10000 // gpu_factor)
evaluation = dict(interval=10000 // gpu_factor, metric="default")
| CODD-main | configs/schedules/schedule_motion.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# optimizer
gpu_factor = 8
max_iter = 100000 // gpu_factor
optimizer = dict(type="Adam", lr=2e-5, weight_decay=1e-6)
optimizer_config = dict(grad_clip=dict(max_norm=1))
# learning policy
lr_config = dict(
policy="OneCycle",
max_lr=2e-5,
total_steps=max_iter,
pct_start=0.001,
anneal_strategy="linear"
)
# runtime settings
runner = dict(type="IterBasedRunner", max_iters=max_iter)
checkpoint_config = dict(by_epoch=False, interval=10000 // gpu_factor)
evaluation = dict(interval=10000 // gpu_factor, metric="default")
| CODD-main | configs/schedules/schedule_motion_finetune.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# optimizer
gpu_factor = 8
max_iter = 50000 // gpu_factor
optimizer = dict(type="Adam", lr=2e-5, weight_decay=1e-6)
optimizer_config = dict(grad_clip=dict(max_norm=1))
# learning policy
lr_config = dict(
policy="OneCycle",
max_lr=2e-5,
total_steps=max_iter,
pct_start=0.001,
anneal_strategy="linear"
)
# runtime settings
runner = dict(type="IterBasedRunner", max_iters=max_iter)
checkpoint_config = dict(by_epoch=False, interval=5000 // gpu_factor)
evaluation = dict(interval=5000 // gpu_factor, metric="default")
| CODD-main | configs/schedules/schedule_fusion_finetune.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# optimizer
gpu_factor = 8
max_iter = 100000 // gpu_factor
optimizer = dict(type="Adam", lr=2e-5, weight_decay=1e-6)
optimizer_config = dict(grad_clip=dict(max_norm=1))
# learning policy
lr_config = dict(
policy="OneCycle",
max_lr=2e-5,
total_steps=max_iter,
pct_start=0.001,
anneal_strategy="linear"
)
# runtime settings
runner = dict(type="IterBasedRunner", max_iters=max_iter)
checkpoint_config = dict(by_epoch=False, interval=10000 // gpu_factor)
evaluation = dict(interval=10000 // gpu_factor, metric="default")
| CODD-main | configs/schedules/schedule_stereo_finetune.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
import os.path as osp
from abc import ABCMeta
from collections import OrderedDict
import numpy as np
import torch
import torch.distributed as dist
from mmcv.runner import BaseModule, auto_fp16
from mmcv.utils import mkdir_or_exist
from mmseg.models.builder import MODELS
from utils import AverageMeter, thres_metric, t_epe_metric, collect_metric, collect_gt, compute_valid_mask, \
compute_gt_disp_change, reset_meter, flow_warp
from .builder import ESTIMATORS
from .motion.raft3d.projective_ops import induced_flow
BF_DEFAULT = 1050 * 0.2 # baseline * focal length
@ESTIMATORS.register_module()
class ConsistentOnlineDynamicDepth(BaseModule, metaclass=ABCMeta):
"""Consistent online depth network"""
def __init__(
self,
stereo=None,
motion=None,
fusion=None,
train_cfg=None,
test_cfg=None,
init_cfg=None,
**kwargs,
):
super(ConsistentOnlineDynamicDepth, self).__init__(**kwargs)
self.fp16_enabled = False
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.build_model(stereo, motion, fusion)
def build_model(self, stereo, motion, fusion):
assert stereo is not None
self.stereo = MODELS.build(stereo)
if motion is not None:
self.motion = MODELS.build(motion)
else:
self.motion = None
if fusion is not None:
self.fusion = MODELS.build(fusion)
else:
self.fusion = None
def freeze_fusion(self):
if (self.train_cfg is not None) and (
self.train_cfg.get("freeze_fusion", False)
):
return True
else:
return False
def freeze_motion(self):
if (self.train_cfg is not None) and (
self.train_cfg.get("freeze_motion", False)
):
return True
else:
return False
def freeze_stereo(self):
if (self.train_cfg is not None) and (
self.train_cfg.get("freeze_stereo", False)
):
return True
else:
return False
def consistent_online_depth_estimation(self, left_img, right_img, img_metas, state):
"""network
Args:
left_img (Tensor)
right_img (Tensor)
img_metas (Tensor): dataset metas
state (dict): states storing past information
Returns:
dict: outputs
"""
if self.freeze_stereo() or not self.training:
with torch.no_grad():
outputs = self.stereo.stereo_matching(
left_img, right_img, img_metas, state
)
else:
outputs = self.stereo.stereo_matching(left_img, right_img, img_metas, state)
if self.motion is not None:
if self.freeze_motion() or not self.training:
with torch.no_grad():
self.motion(
state,
outputs,
img_metas=img_metas,
train_mode=not self.freeze_motion() & self.training,
)
else:
self.motion(
state,
outputs,
img_metas=img_metas,
train_mode=not self.freeze_motion() & self.training,
)
if self.fusion is not None:
if self.freeze_fusion() or not self.training:
with torch.no_grad():
self.fusion.memory_query(outputs, state, img_metas=img_metas)
self.fusion.memory_update(outputs, state, img_metas=img_metas)
else:
self.fusion.memory_query(outputs, state, img_metas=img_metas)
self.fusion.memory_update(outputs, state, img_metas=img_metas)
return outputs
@auto_fp16(apply_to=("img", "r_img"))
def forward(self, img, img_metas, return_loss=True, **kwargs):
"""Calls either :func:`forward_train` or :func:`forward_test` depending
on whether ``return_loss`` is ``True``.
Note this setting will change the expected inputs. When
``return_loss=True``, img and img_meta are single-nested (i.e. Tensor
and List[dict]), and when ``resturn_loss=False``, img and img_meta
should be double nested (i.e. List[Tensor], List[List[dict]]).
"""
if return_loss:
return self.forward_train(img, img_metas, **kwargs)
else:
return self.forward_test(img, img_metas, **kwargs)
def forward_train(
self,
l_img,
img_metas,
r_img,
gt_disp,
gt_semantic_seg=None,
gt_flow=None,
gt_disp_change=None,
gt_flow_occ=None,
gt_disp2=None,
**kwargs,
):
"""train step
Args:
l_img (Tensor): left image
img_metas (List): dataset meta
r_img (Tensor): right image
gt_disp (Tensor): Nx1xHxW
gt_semantic_seg (Tensor, optional): Nx1xHxW. Defaults to None.
gt_flow (Tensor, optional): Nx2xHxW. Defaults to None.
gt_disp_change (Tensor, optional): Nx1xHxW. Defaults to None.
gt_flow_occ (Tensor, optional): Nx1xHxW, occluded regions of flow, to be used to compute disparity change in TartanAir. Defaults to None.
gt_disp2 (Tensor, optional): disparity of next frame in current frame, to be used to compute disparity change in KITTI Depth. Defaults to None.
Returns:
dict: keys preceded with "loss_" will be summed for backpropagation
"""
state = dict(
pred_disp=[],
gt_disp=[],
mask_disp=[],
pred_disp_pyramid=[],
gt_flow=[],
gt_disp_change=[],
gt_flow_occ=[],
gt_disp2=[],
)
l_img_list = torch.unbind(l_img, dim=1)
r_img_list = torch.unbind(r_img, dim=1)
gt_disp_list = torch.unbind(gt_disp, dim=1)
if gt_flow is not None:
gt_flow_list = torch.unbind(gt_flow, dim=1)
else:
gt_flow_list = None
if gt_disp_change is not None:
gt_disp_change_list = torch.unbind(gt_disp_change, dim=1)
else:
gt_disp_change_list = None
if gt_flow_occ is not None:
gt_flow_occ_list = torch.unbind(gt_flow_occ, dim=1)
else:
gt_flow_occ_list = None
if gt_disp2 is not None:
gt_disp2_list = torch.unbind(gt_disp2, dim=1)
else:
gt_disp2_list = None
losses = dict()
for idx, (l_img, r_img, gt_disp) in enumerate(
zip(l_img_list, r_img_list, gt_disp_list)
):
if gt_flow_list is not None:
gt_flow = gt_flow_list[idx]
state["gt_flow"].append(gt_flow)
if gt_disp_change_list is not None:
gt_disp_change = gt_disp_change_list[idx]
state["gt_disp_change"].append(gt_disp_change)
if gt_flow_occ_list is not None:
gt_flow_occ = gt_flow_occ_list[idx] > 0
state["gt_flow_occ"].append(gt_flow_occ)
if gt_disp2_list is not None:
gt_disp2 = gt_disp2_list[idx]
state["gt_disp2"].append(gt_disp2)
# compute valid mask, save to states
mask_disp = compute_valid_mask(gt_disp, img_metas[0], gt_semantic_seg)
state["gt_disp"].append(gt_disp)
state["mask_disp"].append(mask_disp)
if torch.sum(mask_disp).item() == 0:
print("MASK_SUM", mask_disp.shape, torch.sum(mask_disp))
outputs = self.consistent_online_depth_estimation(l_img, r_img, img_metas, state)
loss = self.losses(outputs, gt_disp, mask_disp, idx, state, img_metas[0], gt_semantic_seg)
losses.update(loss)
return losses
def losses(
self, outputs, gt_disp, mask_disp, idx, state, meta, gt_semantic_seg=None
):
"""compute losses
Args:
outputs (List)
gt_disp (Tensor): Nx1xHxW
mask_disp (Tensor): Nx1xHxW, mask for disparity, True for valid
idx (int): frame index of the video sequence
state (dict): memory states of past information
meta (List): dataset meta
gt_semantic_seg (Tensor, optional): Nx1xHxW. Defaults to None.
Returns:
dict: losses
"""
pred_disp = outputs["pred_disp"]
state["pred_disp"].append(pred_disp)
loss = dict()
if not self.freeze_stereo():
self.stereo.losses(
loss, outputs, gt_disp, mask_disp, idx, gt_semantic_seg, meta
)
if idx >= 1:
if self.motion is not None and not self.freeze_motion() and self.motion.loss is not None:
self.motion.losses(loss, outputs, idx, state, meta)
if self.fusion is not None and not self.freeze_fusion() and self.fusion.loss is not None:
self.fusion.losses(loss, outputs, gt_disp, mask_disp, idx, state, meta)
return loss
def forward_test(self, img, img_metas, r_img=None, **kwargs):
"""
Args:
imgs (List[Tensor]): The outer list is not used.
img_metas (List[List[dict]]): The outer list is not used.
The inner list indicates images in a batch.
"""
for var, name in [(img, "img"), (img_metas, "img_metas")]:
if not isinstance(var, list):
raise TypeError(f"{name} must be a list, but got " f"{type(var)}")
img = img[0]
r_img = r_img[0] if r_img is not None else r_img
img_meta = img_metas[0]
with torch.no_grad():
pred = self.inference(img, r_img, img_meta, **kwargs)
pred = [pred]
return pred
def inference(
self, img, r_img, img_meta, reciprocal=False, evaluate=True, **kwargs
):
"""inference
Args:
img (Tensor): left image
r_img (Tensor): right image
img_meta (List): dataset meta
reciprocal (bool, optional): wheter prediction is depth, if True, use "calib" key in meta to convert to disparity. Defaults to False.
evaluate (bool, optional): if True, evalue against GT, if False, output disparity for visualization. Defaults to True.
Returns:
Tensor: The output disp prediction (evaluate=False) or metrics (evaluate=True)
"""
self.reset_inference_state()
l_img_list = torch.unbind(img, dim=1)
r_img_list = torch.unbind(r_img, dim=1)
B, MF, _, H, W = img.shape
(
gt_disp_list,
gt_flow_list,
gt_disp_change_list,
gt_flow_occ_list,
gt_disp2_list,
gt_disp_occ_list,
) = collect_gt(kwargs)
outputs = []
img_h, img_w = img_meta[0]["img_shape"][:2] # to remove padded region for eval
for idx, (l_img, r_img) in enumerate(zip(l_img_list, r_img_list)):
if gt_disp_list is not None:
gt_disp = gt_disp_list[idx][:, :, :img_h, :img_w]
self.inference_state["gt_disp"].append(gt_disp)
else:
gt_disp = None
if gt_flow_list is not None:
gt_flow = gt_flow_list[idx][:, :, :img_h, :img_w]
self.inference_state["gt_flow"].append(gt_flow)
if gt_disp_change_list is not None:
gt_disp_change = gt_disp_change_list[idx][:, :, :img_h, :img_w]
self.inference_state["gt_disp_change"].append(gt_disp_change)
if gt_flow_occ_list is not None:
gt_flow_occ = (
gt_flow_occ_list[idx] > 0
) # 0 for non-occ, True for occluded
self.inference_state["gt_flow_occ"].append(
gt_flow_occ[:, :, :img_h, :img_w]
)
if gt_disp_change_list is None and idx > 0:
gt_disp_change, _ = compute_gt_disp_change(
self.inference_state["gt_flow_occ"][idx - 1],
self.inference_state["gt_disp"][idx - 1],
self.inference_state["gt_disp"][idx],
self.inference_state["gt_flow"][idx - 1],
)
self.inference_state["gt_disp_change"].append(gt_disp_change)
if gt_disp2_list is not None:
gt_disp2 = gt_disp2_list[idx][:, :, :img_h, :img_w]
self.inference_state["gt_disp2"].append(gt_disp2)
if gt_disp_change_list is None:
gt_disp_change = gt_disp2 - gt_disp
gt_disp_change[gt_disp2 <= 0.0] = BF_DEFAULT
gt_disp_change[gt_disp <= 0.0] = BF_DEFAULT
self.inference_state["gt_disp_change"].append(gt_disp_change)
if gt_disp_occ_list is not None:
# True for non-occluded to comply with semantic seg
gt_disp_occ = (gt_disp_occ_list[idx] <= 0)[:, :, :img_h, :img_w]
else:
gt_disp_occ = None
output = self.consistent_online_depth_estimation(
l_img, r_img, img_meta, self.inference_state
)
pred_disp = output["pred_disp"]
# for stereo depth model
if reciprocal:
pred_disp = img_meta[0]["calib"] / pred_disp
# save prediction (uncropped for temporal model)
self.inference_state["pred_disp"].append(pred_disp)
# crop for evaluation
pred_disp = pred_disp[:, :, :img_h, :img_w]
outputs.append(pred_disp)
# perform evaluation if needed
if evaluate:
gt_disp = self.inference_state.get('gt_disp', None)
assert gt_disp is not None, "No ground truth provided"
gt_disp = gt_disp[-1]
# import matplotlib.pyplot as plt
# plt.imshow(gt_disp.squeeze().cpu())
# plt.show()
self.calc_metric(idx, pred_disp, gt_disp, img_meta[0], img_h, img_w, gt_semantic_seg=gt_disp_occ,
Ts=output.get("Ts", None))
if evaluate: # return evaluated metrics
outputs = collect_metric(self.inference_state)
else: # otherwise, return disp map
outputs = torch.cat(outputs, dim=1)
assert len(outputs.shape) == 4, "Output shape is wrong"
return outputs
def reset_inference_state(self):
"""reset inference states when new sequence starts"""
self.inference_state = OrderedDict(
pred_disp=[],
gt_disp=[],
mask_disp=[],
gt_flow=[],
gt_disp_change=[],
gt_flow_occ=[],
gt_disp2=[],
)
# disp metric
self.inference_state["epe_meter"] = AverageMeter()
self.inference_state["th3_meter"] = AverageMeter()
# temporal metric
self.inference_state["tepe_meter"] = AverageMeter()
self.inference_state["th3_tepe_meter"] = AverageMeter()
self.inference_state["tepe_rel_meter"] = AverageMeter()
self.inference_state["th1_tepe_rel_meter"] = AverageMeter()
# magnitude of flow
self.inference_state["flow_mag_meter"] = AverageMeter()
# 3D metric
self.inference_state["count_all"] = 0.0
self.inference_state["epe2d_scene_flow_all"] = 0.0
self.inference_state["epe2d_optical_flow_all"] = 0.0
self.inference_state["1px_scene_flow_all"] = 0.0
self.inference_state["1px_optical_flow_all"] = 0.0
reset_meter(self.inference_state)
def calc_metric(
self,
idx,
pred_disp,
gt_disp,
meta,
h,
w,
gt_semantic_seg=None,
Ts=None,
):
"""evaluate reuslts
Args:
idx (int): frame idx
pred_disp (Tensor): Nx1xHxW
gt_disp (Tensor): Nx1xHxW
meta (dict): dataset meta
h (int): original image height
w (int): original image width
gt_semantic_seg (Tensor, optional): Nx2xHxW. Defaults to None.
Ts (Tensor, optional): NxHxW. Defaults to None.
"""
mask_disp = compute_valid_mask(
gt_disp, meta, gt_semantic_seg=gt_semantic_seg
) # mask excludes invalid disp
self.inference_state["mask_disp"].append(mask_disp)
if mask_disp.any(): # only compute metrics if there are valid pixels
# compute metrics
self.inference_state["epe_meter"].update(
torch.mean(torch.abs(pred_disp[mask_disp] - gt_disp[mask_disp])).item()
)
self.inference_state["th3_meter"].update(
thres_metric(pred_disp, gt_disp, mask_disp, 3.0).item()
)
# temporal metrics
if idx > 0:
# use previous flow to warp current estimation to previous frame
flow = self.inference_state["gt_flow"][-2]
gt_disp_prev = self.inference_state["gt_disp"][-2]
pred_disp_prev = self.inference_state["pred_disp"][-2][:, :, :h, :w] # crop for evaluation
if torch.any(gt_disp > 0.0):
mask = compute_valid_mask(
gt_disp, meta, gt_flow_prev=flow, gt_semantic_seg=gt_semantic_seg
) # mask excludes invalid flow
else: # in kitti, only disp in one frame is provided, so we input dummy gt_disps
mask = compute_valid_mask(
torch.ones_like(gt_disp, device=gt_disp.device) * BF_DEFAULT / 2.0, meta, gt_flow_prev=flow,
gt_semantic_seg=gt_semantic_seg
) # mask excludes invalid flow
to_warp = torch.cat([gt_disp, pred_disp, mask.float()], dim=1)
to_warp, valid = flow_warp(
to_warp, flow, padding_mode="zeros", mode="nearest"
)
warped_gt_disp, warped_pred_disp, mask_warp = torch.unbind(to_warp, dim=1)
warped_gt_disp, warped_pred_disp = warped_gt_disp.unsqueeze(1), warped_pred_disp.unsqueeze(1) # N1HW
mask_curr = (valid.squeeze()[0] & mask_warp.bool() & mask) # excludes flow occ
if len(self.inference_state["gt_disp2"]) > 0: # if gt provides disp2, use provided
warped_gt_disp = self.inference_state["gt_disp2"][-2]
mask_curr &= warped_gt_disp > 0.0
mask_prev = self.inference_state["mask_disp"][-2] # prev mask only excludes invalid disp
# only compute metrics if there are valid pixels
if mask_prev.any() and mask_curr.any():
disp_tepe, disp_tepe_rel = t_epe_metric(warped_pred_disp, warped_gt_disp, pred_disp_prev, gt_disp_prev,
mask_prev, mask_curr)
self.inference_state["tepe_meter"].update(disp_tepe.mean().item())
self.inference_state["tepe_rel_meter"].update(
disp_tepe_rel.mean().item()
)
self.inference_state["th1_tepe_rel_meter"].update(
(disp_tepe_rel > 1.0).float().mean().item()
)
self.inference_state["th3_tepe_meter"].update(
(disp_tepe > 3.0).float().mean().item()
)
mag = torch.sum(flow ** 2, dim=1).sqrt().squeeze()
self.inference_state["flow_mag_meter"].update(mag.mean().item())
# motion metrics
if Ts is not None and len(self.inference_state["gt_disp_change"]) > 0:
if len(self.inference_state["gt_flow_occ"]) > 0:
# in this case, disp change computed from flow
gt_disp_change = self.inference_state["gt_disp_change"][-1]
mask = compute_valid_mask(gt_disp_prev, meta, gt_flow_prev=flow, gt_disp_change=gt_disp_change,
gt_semantic_seg=gt_semantic_seg) # excludes invalid disp change
gt_flow_occ = self.inference_state["gt_flow_occ"][-2]
mask[gt_flow_occ] = False # excludes flow occ since disp change is computed from flow
else: # otherwise, gt disp change provided
gt_disp_change = self.inference_state["gt_disp_change"][-2]
mask = compute_valid_mask(
gt_disp_prev,
meta,
gt_flow_prev=flow,
gt_disp_change=gt_disp_change,
gt_semantic_seg=gt_semantic_seg,
) # excludes invalid disp change
if mask.any(): # only compute metrics if there are valid pixels
flow = flow.permute(0, 2, 3, 1).squeeze() # HW2
# use transformation field to extract 2D and 3D flow
B = pred_disp.shape[0]
intrinsics = meta["intrinsics"]
intrinsics = torch.tensor(intrinsics).to(pred_disp.device).unsqueeze(0).expand(B, -1)
depth1 = BF_DEFAULT / pred_disp_prev
depth1 = torch.clip(depth1, max=BF_DEFAULT, min=0).squeeze(1)
flow2d_est, _, _ = induced_flow(
Ts[:, :h, :w], depth1, intrinsics
)
flow2d_est[..., -1] = (
flow2d_est[..., -1] * BF_DEFAULT
) # by default this is inverse depth, to convert to disparity it needs BF
flow2d = torch.cat(
[flow, gt_disp_change.squeeze()[..., None]], dim=-1
) # HW3
epe2d_scene_flow = torch.sum((flow2d_est - flow2d) ** 2, -1).sqrt()
epe2d_optical_flow = torch.sum(
((flow2d_est - flow2d) ** 2)[..., :2], -1
).sqrt()
# our evaluation (use all valid pixels)
epe2d_scene_flow = epe2d_scene_flow.squeeze()[mask.squeeze()].float()
epe2d_optical_flow_all = epe2d_optical_flow.squeeze()[mask.squeeze()].float()
self.inference_state["count_all"] += epe2d_scene_flow.reshape(-1).shape[0]
self.inference_state["epe2d_scene_flow_all"] += epe2d_scene_flow.sum()
self.inference_state["epe2d_optical_flow_all"] += epe2d_optical_flow_all.sum()
self.inference_state["1px_scene_flow_all"] += torch.sum(
epe2d_scene_flow < 1.0
)
self.inference_state["1px_optical_flow_all"] += torch.sum(
epe2d_optical_flow_all < 1.0
)
def show_result(
self, filename, result, show=False, out_file=None, running_stats=None, **kwargs
):
"""show result either to terminal or save output
Args:
filename (str)
result (Tensor): disparity or metrics
show (bool, optional): if show, output disparity. Defaults to False.
out_file (str, optional): output filename. Defaults to None.
running_stats (optional): running stats to accumulate results. Defaults to None.
"""
if not show:
if running_stats:
result = result[0]
if running_stats.header is None:
running_stats.header = ["filename"] + [k for k in result.keys()]
running_stats.push(filename, [result[k].cpu().item() for k in result.keys()])
else:
disp = result[0].cpu().numpy()
mkdir_or_exist(osp.dirname(out_file))
with open(out_file.replace(osp.splitext(out_file)[1], ".disp.pred.npz"), "wb") as f:
np.savez_compressed(f, disp=disp)
def train(self, mode=True):
"""overloading torch's train function to freeze different modules when necessary
Args:
mode (bool, optional): True to train, False to eval. Defaults to True.
"""
self.training = mode
for module in self.children():
module.train(mode)
if mode is False:
return
if self.freeze_stereo() and self.stereo is not None:
self.stereo.freeze()
if self.freeze_motion() and self.motion is not None:
self.motion.freeze()
if self.freeze_fusion() and self.fusion is not None:
self.fusion.freeze()
if mode:
n_parameters = sum(
p.numel() for n, p in self.named_parameters() if p.requires_grad
)
print(
"PARAM STATUS: total number of training parameters %.3fM"
% (n_parameters / 1000 ** 2)
)
def train_step(self, data_batch, optimizer, **kwargs):
"""The iteration step during training.
This method defines an iteration step during training, except for the
back propagation and optimizer updating, which are done in an optimizer
hook. Note that in some complicated cases or models, the whole process
including back propagation and optimizer updating is also defined in
this method, such as GAN.
Args:
data (dict): The output of dataloader.
optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of
runner is passed to ``train_step()``. This argument is unused
and reserved.
Returns:
dict: It should contain at least 3 keys: ``loss``, ``log_vars``,
``num_samples``.
``loss`` is a tensor for back propagation, which can be a
weighted sum of multiple losses.
``log_vars`` contains all the variables to be sent to the
logger.
``num_samples`` indicates the batch size (when the model is
DDP, it means the batch size on each GPU), which is used for
averaging the logs.
"""
losses = self(**data_batch)
loss, log_vars = self._parse_losses(losses)
train_epe_attrs = [attr for attr in dir(self) if "train_epe" in attr]
for attr in train_epe_attrs:
log_vars.update({attr: getattr(self, attr)})
outputs = dict(
loss=loss,
log_vars=log_vars,
num_samples=len(data_batch["img"].data),
)
return outputs
def val_step(self, data_batch, **kwargs):
"""The iteration step during validation.
This method shares the same signature as :func:`train_step`, but used
during val epochs. Note that the evaluation after training epochs is
not implemented with this method, but an evaluation hook.
"""
output = self(**data_batch, **kwargs)
return output
@staticmethod
def _parse_losses(losses):
"""Parse the raw outputs (losses) of the network.
Args:
losses (dict): Raw output of the network, which usually contain
losses and other necessary information.
Returns:
tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor
which may be a weighted sum of all losses, log_vars contains
all the variables to be sent to the logger.
"""
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
elif isinstance(loss_value, dict):
for k, v in loss_value.items():
log_vars[loss_name + "_" + k] = v
else:
raise TypeError(f"{loss_name} is not a tensor or list of tensors")
loss = sum(
_value
for _key, _value in log_vars.items()
if _key.startswith("loss") or (_key.startswith("decode") and "loss" in _key)
)
log_vars["loss"] = loss
for loss_name, loss_value in log_vars.items():
# reduce loss when distributed training
if dist.is_available() and dist.is_initialized():
loss_value = loss_value.data.clone()
dist.all_reduce(loss_value.div_(dist.get_world_size()))
log_vars[loss_name] = loss_value.item()
return loss, log_vars
| CODD-main | model/codd.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
from .builder import *
from .codd import ConsistentOnlineDynamicDepth
from .fusion import *
from .losses import *
from .motion import *
from .stereo import *
from .lr_updater import *
__all__ = ["build_estimator"]
| CODD-main | model/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.