python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset convert tool for KITTI dataset."""
from nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.datasets.kitti.kitti_dataset import create_kitti_infos
import argparse
from easydict import EasyDict
from pathlib import Path
import yaml
from nvidia_tao_pytorch.core.path_utils import expand_path
def parse_args(args=None):
"""Argument Parser."""
parser = argparse.ArgumentParser(description="KITTI dataset converter.")
parser.add_argument("--config_file", "-c", type=str, help="Dataset config file.")
parser.add_argument("--names", "-n", type=str, help="Class names.")
parser.add_argument("--data_path", "-d", type=str, help="KITTI data path.")
parser.add_argument("--save_path", "-s", type=str, help="Output path.")
return parser.parse_known_args(args)[0]
if __name__ == "__main__":
args = parse_args()
config_file = expand_path(args.config_file)
data_path = expand_path(args.data_path)
save_path = expand_path(args.save_path)
with open(config_file) as f:
dataset_cfg = EasyDict(yaml.load(f, Loader=yaml.FullLoader))
create_kitti_infos(
dataset_cfg=dataset_cfg,
class_names=args.names.strip().split(','),
data_path=Path(data_path),
save_path=Path(save_path)
)
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/tools/scripts/dataset_convert_kitti.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Drop a class in dataset."""
import os
import sys
from nvidia_tao_pytorch.core.path_utils import expand_path
def drop_class(label_dir, classes):
"""drop label by class names."""
labels = []
if os.path.isdir(label_dir):
labels = os.listdir(label_dir)
labels = [os.path.join(label_dir, x) for x in labels]
for gt in labels:
print("Processing ", gt)
with open(gt) as f:
lines = f.readlines()
lines_ret = []
for line in lines:
ls = line.strip()
line = ls.split()
if line[0] in classes:
print("Dropping ", line[0])
continue
else:
lines_ret.append(ls)
with open(gt, "w") as fo:
out = '\n'.join(lines_ret)
fo.write(out)
if __name__ == "__main__":
label_dir = expand_path(sys.argv[1])
drop_class(label_dir, sys.argv[2].split(','))
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/tools/scripts/drop_class.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate LIDAR labels."""
import os
import argparse
import numpy as np
from tqdm import tqdm
from nvidia_tao_pytorch.core.path_utils import expand_path
from nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.utils.object3d_kitti import (
get_objects_from_label
)
from nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.utils.calibration_kitti import (
Calibration
)
def parse_args():
"""Argument Parser."""
parser = argparse.ArgumentParser("Convert camera label to LiDAR label.")
parser.add_argument(
"-l", "--label_dir",
type=str, required=True,
help="Camera label directory."
)
parser.add_argument(
"-c", "--calib_dir",
type=str, required=True,
help="Calibration file directory"
)
parser.add_argument(
"-o", "--output_dir",
type=str, required=True,
help="Output LiDAR label directory"
)
return parser.parse_args()
def generate_lidar_labels(label_dir, calib_dir, output_dir):
"""Generate LiDAR labels from KITTI Camera labels."""
if os.path.isdir(label_dir):
for lab in tqdm(os.listdir(label_dir)):
lab_file = os.path.join(label_dir, lab)
obj_list = get_objects_from_label(lab_file)
calib_file = os.path.join(calib_dir, lab)
calib = Calibration(calib_file)
loc = np.concatenate([obj.loc.reshape(1, 3) for obj in obj_list], axis=0)
loc_lidar = calib.rect_to_lidar(loc)
# update obj3d.loc
with open(os.path.join(output_dir, lab), "w") as lf:
for idx, lc in enumerate(loc_lidar):
# bottom center to 3D center
obj_list[idx].loc = (lc + np.array([0., 0., obj_list[idx].h / 2.]))
# rotation_y to rotation_z
obj_list[idx].ry = -np.pi / 2. - obj_list[idx].ry
lf.write(obj_list[idx].to_kitti_format())
lf.write('\n')
if __name__ == "__main__":
args = parse_args()
label_dir = expand_path(args.label_dir)
calib_dir = expand_path(args.calib_dir)
output_dir = expand_path(args.output_dir)
generate_lidar_labels(label_dir, calib_dir, output_dir)
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/tools/scripts/gen_lidar_labels.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Get point cloud ranges."""
import os
import sys
import numpy as np
from nvidia_tao_pytorch.core.path_utils import expand_path
def calculate_pc_ranges(pc_path):
"""Get pointcloud data xyz ranges."""
if os.path.isdir(pc_path):
for idx, f in enumerate(os.listdir(pc_path)):
pc_file = os.path.join(pc_path, f)
xyz = np.fromfile(pc_file, dtype=np.float32).reshape(-1, 4)
if idx == 0:
x_min = np.amin(xyz[:, 0])
x_max = np.amax(xyz[:, 0])
y_min = np.amin(xyz[:, 1])
y_max = np.amax(xyz[:, 1])
z_min = np.amin(xyz[:, 2])
z_max = np.amax(xyz[:, 2])
else:
x_min = min(np.amin(xyz[:, 0]), x_min)
x_max = max(np.amax(xyz[:, 0]), x_max)
y_min = min(np.amin(xyz[:, 1]), y_min)
y_max = max(np.amax(xyz[:, 1]), y_max)
z_min = min(np.amin(xyz[:, 2]), z_min)
z_max = max(np.amax(xyz[:, 2]), z_max)
print("Pointcloud ranges: ", x_min, y_min, z_min, x_max, y_max, z_max)
if __name__ == "__main__":
pc_path = expand_path(sys.argv[1])
calculate_pc_ranges(pc_path)
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/tools/scripts/get_pc_ranges.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Filter labels by LIDAR."""
import os
import sys
from nvidia_tao_pytorch.core.path_utils import expand_path
def filter_labels(label_dir, lidar_dir, val_label_dir):
"""filter labels by lidar."""
labels, lidars = [], []
if os.path.isdir(label_dir):
labels = os.listdir(label_dir)
if os.path.isdir(lidar_dir):
lidars = os.listdir(lidar_dir)
for label in labels:
lidar = label[:-4] + ".bin"
if lidar not in lidars:
print("Moving to ", os.path.join(val_label_dir, label))
os.rename(os.path.join(label_dir, label), os.path.join(val_label_dir, label))
if __name__ == "__main__":
label_dir = expand_path(sys.argv[1])
lidar_dir = expand_path(sys.argv[2])
val_label_dir = expand_path(sys.argv[3])
filter_labels(label_dir, lidar_dir, val_label_dir)
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/tools/scripts/filter_labels.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Split KITTI datset into train/val."""
import os
import sys
from nvidia_tao_pytorch.core.path_utils import expand_path
def split(list_file, lidar, label, output_lidar, output_label):
"""train/val split of the KITTI dataset."""
with open(list_file) as lf:
file_names = lf.readlines()
file_names = [f.strip() for f in file_names]
if os.path.isdir(lidar):
for li in os.listdir(lidar):
if li[:-4] in file_names:
os.rename(os.path.join(lidar, li), os.path.join(output_lidar, li))
if os.path.isdir(label):
for la in os.listdir(label):
if la[:-4] in file_names:
os.rename(os.path.join(label, la), os.path.join(output_label, la))
if __name__ == "__main__":
list_file = expand_path(sys.argv[1])
lidar = expand_path(sys.argv[2])
label = expand_path(sys.argv[3])
output_lidar = expand_path(sys.argv[4])
output_label = expand_path(sys.argv[5])
split(list_file, lidar, label, output_lidar, output_label)
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/tools/scripts/kitti_split.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate LIDAR points."""
import os
import argparse
import numpy as np
from skimage import io
from tqdm import tqdm
from nvidia_tao_pytorch.core.path_utils import expand_path
from nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.utils.calibration_kitti import (
Calibration
)
def parse_args():
"""Argument Parser."""
parser = argparse.ArgumentParser("Limit LIDAR points to FOV range.")
parser.add_argument(
"-p", "--points_dir",
type=str, required=True,
help="LIDAR points directory."
)
parser.add_argument(
"-c", "--calib_dir",
type=str, required=True,
help="Calibration file directory"
)
parser.add_argument(
"-o", "--output_dir",
type=str, required=True,
help="Output LiDAR points directory"
)
parser.add_argument(
"-i",
"--image_dir",
type=str, required=True,
help="image directory"
)
return parser.parse_args()
def get_fov_flag(pts_rect, img_shape, calib):
"""Get FOV flags."""
pts_img, pts_rect_depth = calib.rect_to_img(pts_rect)
val_flag_1 = np.logical_and(pts_img[:, 0] >= 0, pts_img[:, 0] < img_shape[1])
val_flag_2 = np.logical_and(pts_img[:, 1] >= 0, pts_img[:, 1] < img_shape[0])
val_flag_merge = np.logical_and(val_flag_1, val_flag_2)
pts_valid_flag = np.logical_and(val_flag_merge, pts_rect_depth >= 0)
return pts_valid_flag
def generate_lidar_points(points_dir, calib_dir, output_dir, image_dir):
"""Limit LiDAR points to FOV range."""
if os.path.isdir(points_dir):
for pts in tqdm(os.listdir(points_dir)):
pts_file = os.path.join(points_dir, pts)
points = np.fromfile(pts_file, dtype=np.float32).reshape(-1, 4)
calib_file = os.path.join(calib_dir, pts[:-4] + ".txt")
calib = Calibration(calib_file)
pts_rect = calib.lidar_to_rect(points[:, 0:3])
img_file = os.path.join(image_dir, pts[:-4] + ".png")
img_shape = np.array(io.imread(img_file).shape[:2], dtype=np.int32)
fov_flag = get_fov_flag(pts_rect, img_shape, calib)
points = points[fov_flag]
points.tofile(os.path.join(output_dir, pts))
# double check
points_cp = np.fromfile(os.path.join(output_dir, pts), dtype=np.float32).reshape(-1, 4)
assert np.equal(points, points_cp).all()
else:
raise NotADirectoryError("LiDAR points directory does not exist")
if __name__ == "__main__":
args = parse_args()
points_dir = expand_path(args.points_dir)
calib_dir = expand_path(args.calib_dir)
output_dir = expand_path(args.output_dir)
image_dir = expand_path(args.image_dir)
generate_lidar_points(points_dir, calib_dir, output_dir, image_dir)
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/tools/scripts/gen_lidar_points.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorRT inference model builder for PointPillars."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from io import open # Python 2/3 compatibility. pylint: disable=W0622
import logging
import os
import numpy as np
import pycuda.autoinit # noqa pylint: disable=W0611
import pycuda.driver as cuda
import tensorrt as trt
from nvidia_tao_pytorch.pointcloud.pointpillars.tools.export.tensorrt import (
_create_tensorrt_logger
)
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(name)s: %(message)s',
level='INFO')
logger = logging.getLogger(__name__)
BINDING_TO_DTYPE = {
"points": np.float32,
"num_points": np.int32,
"output_boxes": np.float32,
"num_boxes": np.int32,
}
class CacheCalibrator(trt.IInt8EntropyCalibrator2):
"""Calibrator class that loads a cache file directly.
This inherits from ``trt.IInt8EntropyCalibrator2`` to implement
the calibration interface that TensorRT needs to calibrate the
INT8 quantization factors.
Args:
calibration_filename (str): name of calibration to read/write to.
"""
def __init__(self, cache_filename, *args, **kwargs):
"""Init routine."""
super(CacheCalibrator, self).__init__(*args, **kwargs)
self._cache_filename = cache_filename
def get_batch(self, names):
"""Dummy method since we are going to use cache file directly.
Args:
names (list): list of memory bindings names.
"""
return None
def get_batch_size(self):
"""Return batch size."""
return 8
def read_calibration_cache(self):
"""Read calibration from file."""
if os.path.exists(self._cache_filename):
with open(self._cache_filename, "rb") as f:
return f.read()
else:
raise ValueError("""Calibration cache file
not found: {}""".format(self._cache_filename))
def write_calibration_cache(self, cache):
"""Do nothing since we already have cache file.
Args:
cache (memoryview): buffer to read calibration data from.
"""
return
class Engine(object):
"""A class to represent a TensorRT engine.
This class provides utility functions for performing inference on
a TensorRT engine.
Args:
engine: the CUDA engine to wrap.
"""
def __init__(self, engine, batch_size):
"""Initialization routine."""
self._engine = engine
self._context = None
self._batch_size = batch_size
self._actual_batch_size = batch_size
@contextlib.contextmanager
def _create_context(self):
"""Create an execution context and allocate input/output buffers."""
try:
with self._engine.create_execution_context() as self._context:
self._device_buffers = []
self._host_buffers = []
self._input_binding_ids = {}
self.points_batch_size = self._context.get_binding_shape(0)[1]
for i in range(self._engine.num_bindings):
tensor_shape = self._engine.get_binding_shape(i)
elt_count = trt.volume(tensor_shape)
binding_name = self._engine.get_binding_name(i)
dtype = BINDING_TO_DTYPE[binding_name]
if self._engine.binding_is_input(i):
self._input_binding_ids[binding_name] = i
page_locked_mem = None
else:
page_locked_mem = cuda.pagelocked_empty(elt_count, dtype=dtype)
page_locked_mem = page_locked_mem.reshape(*tensor_shape)
# Allocate memory.
self._host_buffers.append(page_locked_mem)
_mem_alloced = cuda.mem_alloc(elt_count * np.dtype(dtype).itemsize)
self._device_buffers.append(_mem_alloced)
if not self._input_binding_ids:
raise RuntimeError("No input bindings detected.")
# Create stream and events to measure timings.
self._stream = cuda.Stream()
self._start = cuda.Event()
self._end = cuda.Event()
yield
finally:
# Release context and allocated memory.
self._release_context()
def _do_infer(self, batch):
# make sure it is contiguous array
bindings = [int(device_buffer) for device_buffer in self._device_buffers]
# Transfer input data to device.
for node_name, array in batch.items():
if node_name == "points":
if isinstance(array, list):
# Convert list to array
array_concat = []
for ar in array:
if ar.shape[0] > self.points_batch_size:
raise ValueError(
f"Input LIDAR file has points number: {ar.shape[0]} larger than "
f"the one specified in ONNX model: {self.points_batch_size}, please set "
"cfg.model.inference.max_points_num to a larger "
"value and re-export to ONNX model and TensorRT "
"engine"
)
array_concat.append(
np.pad(ar, ((0, self.points_batch_size - ar.shape[0]), (0, 0)))
)
array = np.stack(array_concat, axis=0)
if len(array.shape) == 2:
array = np.expand_dims(array, axis=0)
if array.shape[1] > self.points_batch_size:
raise ValueError(
f"Input LIDAR file has points number: {array.shape[1]} larger than "
f"the one specified in ONNX model: {self.points_batch_size}, please set "
"cfg.model.inference.max_points_num to a larger "
"value and re-export to ONNX model and TensorRT "
"engine"
)
if array.shape[1] < self.points_batch_size:
array = np.pad(
array,
((0, 0), (0, self.points_batch_size - array.shape[1]), (0, 0)),
constant_values=0.
)
# The last batch can be smaller
if array.shape[0] < self._batch_size:
self._actual_batch_size = array.shape[0]
delta_batch = self._batch_size - array.shape[0]
pad_array = np.repeat(array[0:1, ...], delta_batch, axis=0)
array = np.concatenate([array, pad_array], axis=0)
elif node_name == "num_points":
if isinstance(array, list):
array = np.stack(array)
if array.shape[0] < self._batch_size:
self._actual_batch_size = array.shape[0]
delta_batch = self._batch_size - array.shape[0]
pad_array = np.repeat(array[0:1], delta_batch, axis=0)
array = np.concatenate([array, pad_array], axis=0)
array = array.astype("int32")
else:
raise KeyError(f"Unknown input data: {node_name}")
array = np.ascontiguousarray(array)
cuda.memcpy_htod_async(
self._device_buffers[self._input_binding_ids[node_name]],
array,
self._stream
)
# Execute model.
self._start.record(self._stream)
self._context.execute_async_v2(bindings, self._stream.handle, None)
self._end.record(self._stream)
self._end.synchronize()
# Transfer predictions back.
outputs = dict()
for i in range(self._engine.num_bindings):
if not self._engine.binding_is_input(i):
cuda.memcpy_dtoh_async(self._host_buffers[i], self._device_buffers[i],
self._stream)
out = np.copy(self._host_buffers[i][:self._actual_batch_size])
name = self._engine.get_binding_name(i)
outputs[name] = out
return outputs
def _release_context(self):
"""Release context and allocated memory."""
for device_buffer in self._device_buffers:
device_buffer.free()
del (device_buffer)
for host_buffer in self._host_buffers:
del (host_buffer)
del (self._start)
del (self._end)
del (self._stream)
def infer(self, batch):
"""Perform inference on a Numpy array.
Args:
batch (ndarray): array to perform inference on.
Returns:
A dictionary of outputs where keys are output names
and values are output tensors.
"""
with self._create_context():
outputs = self._do_infer(batch)
return outputs
def infer_iterator(self, iterator):
"""Perform inference on an iterator of Numpy arrays.
This method should be preferred to ``infer`` when performing
inference on multiple Numpy arrays since this will re-use
the allocated execution and memory.
Args:
iterator: an iterator that yields Numpy arrays.
Yields:
A dictionary of outputs where keys are output names
and values are output tensors, for each array returned
by the iterator.
Returns:
None.
"""
with self._create_context():
for batch in iterator:
outputs = self._do_infer(batch)
yield outputs
def save(self, filename):
"""Save serialized engine into specified file.
Args:
filename (str): name of file to save engine to.
"""
with open(filename, "wb") as outf:
outf.write(self._engine.serialize())
class TrtModel(object):
"""A TensorRT model builder for FasterRCNN model inference based on TensorRT.
The TensorRT model builder builds a TensorRT engine from the engine file from the
tlt-converter and do inference in TensorRT. We use this as a way to verify the
TensorRT inference functionality of the FasterRCNN model.
"""
def __init__(self,
trt_engine_file,
batch_size):
"""Initialize the TensorRT model builder."""
self._trt_engine_file = trt_engine_file
self._batch_size = batch_size
self._trt_logger = _create_tensorrt_logger()
trt.init_libnvinfer_plugins(self._trt_logger, "")
def set_engine(self, trt_engine):
"""Set engine."""
self.engine = Engine(trt_engine,
self._batch_size)
def load_trt_engine_file(self):
"""load TensorRT engine file generated by tlt-converter."""
runtime = trt.Runtime(self._trt_logger)
with open(self._trt_engine_file, 'rb') as f:
_engine = f.read()
logger.info("Loading existing TensorRT engine and "
"ignoring the specified batch size and data type"
" information in spec file.")
self.engine = Engine(runtime.deserialize_cuda_engine(_engine),
self._batch_size)
def build_or_load_trt_engine(self):
"""Build engine or load engine depends on whether a trt engine is available."""
if self._trt_engine_file is not None:
# load engine
logger.info("""Loading TensorRT engine file: {}
for inference.""".format(self._trt_engine_file))
self.load_trt_engine_file()
else:
raise ValueError("""A TensorRT engine file should
be provided for TensorRT based inference.""")
def predict(self, batch):
"""Do inference with TensorRT engine."""
return self.engine.infer(batch)
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/tools/export/tensorrt_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export tools for PointPillars."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/tools/export/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ONNX model simplifications."""
import numpy as np
import onnx
import onnx_graphsurgeon as gs
@gs.Graph.register()
def replace_with_scatter(self, inputs, outputs):
"""Insert Scatter plugin."""
# Disconnect output nodes of all input tensors
dense_shape = outputs[0].shape[2:4]
for inp in inputs:
inp.outputs.clear()
# Disconnet input nodes of all output tensors
for out in outputs:
out.inputs.clear()
# Insert the new node.
attrs = {"dense_shape": dense_shape}
return self.layer(
op="PillarScatterPlugin",
name="PillarScatterPlugin_0",
inputs=inputs,
outputs=outputs,
attrs=attrs
)
def recursive_set_shape(node):
"""Recursively set shape."""
for ot in node.outputs:
ot.shape = tuple(["batch"] + list(ot.shape[1:]))
for on in ot.outputs:
recursive_set_shape(on)
def simplify_onnx(onnx_model, cfg):
"""Simplify ONNX model."""
graph = gs.import_onnx(onnx_model)
tmap = graph.tensors()
MAX_VOXELS = tmap["input"].shape[1]
MAX_POINTS = tmap["input"].shape[2]
# (point_feats, cluster, center)
NUM_FEATS = tmap["input"].shape[3] + 3 + 3
input_new = gs.Variable(name="input", dtype=np.float32, shape=("batch", MAX_VOXELS, MAX_POINTS, NUM_FEATS))
X = gs.Variable(name="coords_", dtype=np.int32, shape=("batch", MAX_VOXELS, 4))
Y = gs.Variable(name="params", dtype=np.int32, shape=("batch",))
first_node_after_pillarscatter = [node for node in graph.nodes if node.op == "Conv"][0]
first_node_pillarvfe = [node for node in graph.nodes if node.op == "MatMul"][0]
first_node_pillarvfe = first_node_pillarvfe.i()
current_node = first_node_pillarvfe
for _ in range(7):
current_node = current_node.o()
last_node_pillarvfe = current_node
# merge some layers into one layer between inputs and outputs as below
graph.inputs.append(Y)
inputs = [last_node_pillarvfe.outputs[0], X, Y]
outputs = [first_node_after_pillarscatter.inputs[0]]
graph.replace_with_scatter(inputs, outputs)
# Remove the now-dangling subgraph.
graph.cleanup().toposort()
# just keep some layers between inputs and outputs as below
graph.inputs = [first_node_pillarvfe.inputs[0], X, Y]
graph.outputs = [tmap["cls_preds"], tmap["box_preds"], tmap["dir_cls_preds"]]
# Notice that we do not need to manually modify the rest of the graph. ONNX GraphSurgeon will
# take care of removing any unnecessary nodes or tensors, so that we are left with only the subgraph.
graph.cleanup()
graph.inputs = [input_new, X, Y]
first_add = [node for node in graph.nodes if node.op == "MatMul"][0]
first_add = first_add.i()
first_add.inputs[0] = input_new
graph.cleanup().toposort()
scatter_node = [n for n in graph.nodes if n.op == "PillarScatterPlugin"][0]
lidar_point_features = cfg.dataset.data_augmentor.aug_config_list[0].num_point_features
points = gs.Variable(
name="points",
dtype=np.float32,
shape=("batch", cfg.inference.max_points_num, lidar_point_features)
)
num_points = gs.Variable(name="num_points", dtype=np.int32, shape=("batch",))
voxels = gs.Variable(
name="voxels", dtype=np.float32,
shape=("batch", MAX_VOXELS, MAX_POINTS, NUM_FEATS)
)
voxel_coords = gs.Variable(name="voxel_coords", dtype=np.int32, shape=("batch", MAX_VOXELS, 4))
num_pillar = gs.Variable(name="num_pillar", dtype=np.int32, shape=("batch",))
pfp_attrs = dict()
pfp_attrs["max_voxels"] = MAX_VOXELS
pfp_attrs["max_num_points_per_voxel"] = MAX_POINTS
pfp_attrs["voxel_feature_num"] = NUM_FEATS
pfp_attrs["point_cloud_range"] = cfg.dataset.point_cloud_range
pfp_attrs["voxel_size"] = cfg.dataset.data_processor[2].voxel_size
VoxelGenerator_plugin = gs.Node(
op="VoxelGeneratorPlugin",
name="VoxelGeneratorPlugin_0",
inputs=[points, num_points],
outputs=[voxels, voxel_coords, num_pillar],
attrs=pfp_attrs
)
first_add.inputs[0] = VoxelGenerator_plugin.outputs[0]
scatter_node.inputs = [
scatter_node.inputs[0],
VoxelGenerator_plugin.outputs[1],
VoxelGenerator_plugin.outputs[2]
]
graph.nodes.append(VoxelGenerator_plugin)
graph.inputs = [points, num_points]
graph.cleanup().toposort()
# Append postprocessing node
num_boxes = gs.Variable(name="num_boxes", dtype=np.int32, shape=("batch",))
decodebbox_attrs = dict()
decodebbox_attrs["point_cloud_range"] = cfg.dataset.point_cloud_range
decodebbox_attrs["num_dir_bins"] = cfg.model.dense_head.num_dir_bins
decodebbox_attrs["dir_offset"] = cfg.model.dense_head.dir_offset
decodebbox_attrs["dir_limit_offset"] = cfg.model.dense_head.dir_limit_offset
decodebbox_attrs["score_thresh"] = cfg.model.post_processing.score_thresh
decodebbox_attrs["anchor_bottom_height"] = []
decodebbox_attrs["anchors"] = []
for anchor in cfg.model.dense_head.anchor_generator_config:
decodebbox_attrs["anchor_bottom_height"].extend(
anchor["anchor_bottom_heights"]
)
for anc_size in anchor["anchor_sizes"]:
for anc_rot in anchor["anchor_rotations"]:
_anc_size = anc_size.copy()
_anc_size.append(anc_rot)
decodebbox_attrs["anchors"].extend(
_anc_size
)
num_classes = len(decodebbox_attrs["anchor_bottom_height"])
nms_2d_size = graph.outputs[0].shape[1] * graph.outputs[0].shape[2]
output_boxes = gs.Variable(
name="output_boxes",
dtype=np.float32,
shape=("batch", nms_2d_size * num_classes * 2, 9)
)
DecodeBbox_plugin = gs.Node(
op="DecodeBbox3DPlugin",
name="DecodeBbox3DPlugin_0",
inputs=graph.outputs,
outputs=[output_boxes, num_boxes],
attrs=decodebbox_attrs
)
graph.nodes.append(DecodeBbox_plugin)
graph.outputs = DecodeBbox_plugin.outputs
graph.cleanup().toposort()
# Recursively set shape[0] = "batch"
recursive_set_shape(scatter_node)
return gs.export_onnx(graph)
if __name__ == '__main__':
mode_file = "pointpillars-native-sim.onnx"
simplify_onnx(onnx.load(mode_file))
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/tools/export/simplifier_onnx.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PointPillars INT8 calibration APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
from io import open # Python 2/3 compatibility. pylint: disable=W0622
import logging
import os
import sys
import traceback
import numpy as np
from nvidia_tao_pytorch.pointcloud.pointpillars.tools.export.decorators import override, subclass
"""Logger for data export APIs."""
logger = logging.getLogger(__name__)
try:
import pycuda.autoinit # noqa pylint: disable=W0611
import pycuda.driver as cuda
import tensorrt as trt
except ImportError:
# TODO(xiangbok): we should probably do this test in modulus/export/__init__.py.
logger.warning(
"Failed to import TRT and/or CUDA. TensorRT optimization "
"and inference will not be available."
)
DEFAULT_MAX_WORKSPACE_SIZE = 1 << 30
DEFAULT_MAX_BATCH_SIZE = 100
DEFAULT_MIN_BATCH_SIZE = 1
DEFAULT_OPT_BATCH_SIZE = 100
# Array of TensorRT loggers. We need to keep global references to
# the TensorRT loggers that we create to prevent them from being
# garbage collected as those are referenced from C++ code without
# Python knowing about it.
tensorrt_loggers = []
# If we were unable to load TensorRT packages because TensorRT is not installed
# then we will stub the exported API.
if "trt" not in globals():
keras_to_tensorrt = None
load_tensorrt_engine = None
else:
# We were able to load TensorRT package so we are implementing the API
def _create_tensorrt_logger(verbose=False):
"""Create a TensorRT logger.
Args:
verbose (bool): whether to make the logger verbose.
"""
if str(os.getenv('SUPPRES_VERBOSE_LOGGING', '0')) == '1':
# Do not print any warnings in TLT docker
trt_verbosity = trt.Logger.Severity.ERROR
elif verbose:
trt_verbosity = trt.Logger.Severity.INFO
else:
trt_verbosity = trt.Logger.Severity.WARNING
tensorrt_logger = trt.Logger(trt_verbosity)
tensorrt_loggers.append(tensorrt_logger)
return tensorrt_logger
class Calibrator(trt.IInt8EntropyCalibrator2):
"""Calibrator class.
This inherits from ``trt.IInt8EntropyCalibrator2`` to implement
the calibration interface that TensorRT needs to calibrate the
INT8 quantization factors.
Args:
data_dir (str): Directory path of LIDAR files.
calibration_filename (str): Name of calibration to read/write to.
n_batches (int): Number of batches for calibrate for.
batch_size (int): Batch size to use for calibration (this must be
smaller or equal to the batch size of the provided data).
"""
def __init__(
self, data_dir,
cache_filename,
n_batches,
batch_size,
max_points_num,
*args, **kwargs
):
"""Init routine."""
super(Calibrator, self).__init__(*args, **kwargs)
self._data_dir = data_dir
self._cache_filename = cache_filename
self._batch_size = batch_size
self._n_batches = n_batches
self._max_points_num = max_points_num
self._batch_count = 0
self._data_mem_points = None
self._data_mem_num_points = None
self._lidar_files = glob.glob(data_dir + "/*.bin")
if len(self._lidar_files) < batch_size * n_batches:
raise OSError(
f"No enough data files, got {len(self._lidar_files)}, "
f"requested {batch_size * n_batches}"
)
def get_algorithm(self):
"""Get algorithm."""
return trt.CalibrationAlgoType.ENTROPY_CALIBRATION_2
def get_batch(self, names):
"""Return one batch.
Args:
names (list): list of memory bindings names.
"""
print("Get batch: ", self._batch_count)
if self._batch_count < self._n_batches:
batch_files = self._lidar_files[
self._batch_count * self._batch_size: (self._batch_count + 1) * self._batch_size
]
points = []
num_points = []
for f in batch_files:
_points = np.fromfile(f, dtype=np.float32).reshape(-1, 4)
num_points.append(_points.shape[0])
points.append(
np.pad(
_points,
((0, self._max_points_num - _points.shape[0]), (0, 0))
)
)
points = np.stack(points, axis=0)
num_points = np.stack(num_points, axis=0)
if self._data_mem_points is None:
self._data_mem_points = cuda.mem_alloc(points.size * 4)
if self._data_mem_num_points is None:
self._data_mem_num_points = cuda.mem_alloc(num_points.size * 4)
cuda.memcpy_htod(
self._data_mem_points, np.ascontiguousarray(points, dtype=np.float32)
)
cuda.memcpy_htod(
self._data_mem_num_points, np.ascontiguousarray(num_points, dtype=np.int32)
)
self._batch_count += 1
return [int(self._data_mem_points), int(self._data_mem_num_points)]
if self._data_mem_points is not None:
self._data_mem_points.free()
if self._data_mem_num_points is not None:
self._data_mem_num_points.free()
return None
def get_batch_size(self):
"""Return batch size."""
return self._batch_size
def read_calibration_cache(self):
"""Read calibration from file."""
logger.debug("read_calibration_cache - no-op")
if os.path.isfile(self._cache_filename):
logger.warning(
"Calibration file %s exists but is being "
"ignored." % self._cache_filename
)
def write_calibration_cache(self, cache):
"""Write calibration to file.
Args:
cache (memoryview): buffer to read calibration data from.
"""
logger.info(
"Saving calibration cache (size %d) to %s",
len(cache),
self._cache_filename,
)
with open(self._cache_filename, "wb") as f:
f.write(cache)
def _set_excluded_layer_precision(network, fp32_layer_names, fp16_layer_names):
"""When generating an INT8 model, it sets excluded layers' precision as fp32 or fp16.
In detail, this function is only used when generating INT8 TensorRT models. It accepts
two lists of layer names: (1). for the layers in fp32_layer_names, their precision will
be set as fp32; (2). for those in fp16_layer_names, their precision will be set as fp16.
Args:
network: TensorRT network object.
fp32_layer_names (list): List of layer names. These layers use fp32.
fp16_layer_names (list): List of layer names. These layers use fp16.
"""
is_mixed_precision = False
use_fp16_mode = False
for i, layer in enumerate(network):
if any(s in layer.name for s in fp32_layer_names):
is_mixed_precision = True
layer.precision = trt.float32
layer.set_output_type(0, trt.float32)
logger.info("fp32 index: %d; name: %s", i, layer.name)
elif any(s in layer.name for s in fp16_layer_names):
is_mixed_precision = True
use_fp16_mode = True
layer.precision = trt.float16
layer.set_output_type(0, trt.float16)
logger.info("fp16 index: %d; name: %s", i, layer.name)
else:
# To ensure int8 optimization is not done for shape layer
if (not layer.get_output(0).is_shape_tensor):
layer.precision = trt.int8
layer.set_output_type(0, trt.int8)
return is_mixed_precision, use_fp16_mode
class EngineBuilder(object):
"""Create a TensorRT engine.
Args:
filename (list): List of filenames to load model from.
max_batch_size (int): Maximum batch size.
vmax_workspace_size (int): Maximum workspace size.
dtype (str): data type ('fp32', 'fp16' or 'int8').
calibrator (:any:`Calibrator`): Calibrator to use for INT8 optimization.
fp32_layer_names (list): List of layer names. These layers use fp32.
fp16_layer_names (list): List of layer names. These layers use fp16.
verbose (bool): Whether to turn on verbose mode.
tensor_scale_dict (dict): Dictionary mapping names to tensor scaling factors.
strict_type(bool): Whether or not to apply strict_type_constraints for INT8 mode.
"""
def __init__(
self,
filenames,
max_batch_size=DEFAULT_MAX_BATCH_SIZE,
max_workspace_size=DEFAULT_MAX_WORKSPACE_SIZE,
dtype="fp32",
calibrator=None,
fp32_layer_names=None,
fp16_layer_names=None,
verbose=False,
tensor_scale_dict=None,
strict_type=False,
):
"""Initialization routine."""
if dtype == "int8":
self._dtype = trt.DataType.INT8
elif dtype == "fp16":
self._dtype = trt.DataType.HALF
elif dtype == "fp32":
self._dtype = trt.DataType.FLOAT
else:
raise ValueError("Unsupported data type: %s" % dtype)
self._strict_type = strict_type
if fp32_layer_names is None:
fp32_layer_names = []
elif dtype != "int8":
raise ValueError(
"FP32 layer precision could be set only when dtype is INT8"
)
if fp16_layer_names is None:
fp16_layer_names = []
elif dtype != "int8":
raise ValueError(
"FP16 layer precision could be set only when dtype is INT8"
)
self._fp32_layer_names = fp32_layer_names
self._fp16_layer_names = fp16_layer_names
self._tensorrt_logger = _create_tensorrt_logger(verbose)
builder = trt.Builder(self._tensorrt_logger)
trt.init_libnvinfer_plugins(self._tensorrt_logger, "")
if self._dtype == trt.DataType.HALF and not builder.platform_has_fast_fp16:
logger.error("Specified FP16 but not supported on platform.")
raise AttributeError("Specified FP16 but not supported on platform.")
return
if self._dtype == trt.DataType.INT8 and not builder.platform_has_fast_int8:
logger.error("Specified INT8 but not supported on platform.")
raise AttributeError("Specified INT8 but not supported on platform.")
return
if self._dtype == trt.DataType.INT8:
if tensor_scale_dict is None and calibrator is None:
logger.error("Specified INT8 but neither calibrator "
"nor tensor_scale_dict is provided.")
raise AttributeError("Specified INT8 but no calibrator "
"or tensor_scale_dict is provided.")
network = builder.create_network()
self._load_from_files(filenames, network)
builder.max_batch_size = max_batch_size
builder.max_workspace_size = max_workspace_size
if self._dtype == trt.DataType.HALF:
builder.fp16_mode = True
if self._dtype == trt.DataType.INT8:
builder.int8_mode = True
if tensor_scale_dict is None:
builder.int8_calibrator = calibrator
# When use mixed precision, for TensorRT builder:
# strict_type_constraints needs to be True;
# fp16_mode needs to be True if any layer uses fp16 precision.
builder.strict_type_constraints, builder.fp16_mode = \
_set_excluded_layer_precision(
network=network,
fp32_layer_names=self._fp32_layer_names,
fp16_layer_names=self._fp16_layer_names,
)
else:
# Discrete Volta GPUs don't have int8 tensor cores. So TensorRT might
# not pick int8 implementation over fp16 or even fp32 for V100
# GPUs found on data centers (e.g., AVDC). This will be a discrepancy
# compared to Turing GPUs including d-GPU of DDPX and also Xavier i-GPU
# both of which have int8 accelerators. We set the builder to strict
# mode to avoid picking higher precision implementation even if they are
# faster.
if self._strict_type:
builder.strict_type_constraints = True
else:
builder.fp16_mode = True
self._set_tensor_dynamic_ranges(
network=network, tensor_scale_dict=tensor_scale_dict
)
engine = builder.build_cuda_engine(network)
try:
assert engine
except AssertionError:
logger.error("Failed to create engine")
_, _, tb = sys.exc_info()
traceback.print_tb(tb) # Fixed format
tb_info = traceback.extract_tb(tb)
_, line, _, text = tb_info[-1]
raise AssertionError(
"Parsing failed on line {} in statement {}".format(line, text)
)
self._engine = engine
def _load_from_files(self, filenames, network):
"""Load an engine from files."""
raise NotImplementedError()
@staticmethod
def _set_tensor_dynamic_ranges(network, tensor_scale_dict):
"""Set the scaling factors obtained from quantization-aware training.
Args:
network: TensorRT network object.
tensor_scale_dict (dict): Dictionary mapping names to tensor scaling factors.
"""
tensors_found = []
for idx in range(network.num_inputs):
input_tensor = network.get_input(idx)
if input_tensor.name in tensor_scale_dict:
tensors_found.append(input_tensor.name)
cal_scale = tensor_scale_dict[input_tensor.name]
input_tensor.dynamic_range = (-cal_scale, cal_scale)
for layer in network:
found_all_outputs = True
for idx in range(layer.num_outputs):
output_tensor = layer.get_output(idx)
if output_tensor.name in tensor_scale_dict:
tensors_found.append(output_tensor.name)
cal_scale = tensor_scale_dict[output_tensor.name]
output_tensor.dynamic_range = (-cal_scale, cal_scale)
else:
found_all_outputs = False
if found_all_outputs:
layer.precision = trt.int8
tensors_in_dict = tensor_scale_dict.keys()
if set(tensors_in_dict) != set(tensors_found):
print("Tensors in scale dictionary but not in network:",
set(tensors_in_dict) - set(tensors_found))
def get_engine(self):
"""Return the engine that was built by the instance."""
return self._engine
@subclass
class ONNXEngineBuilder(EngineBuilder):
"""Create a TensorRT engine from an ONNX file.
Args:
filename (str): ONNX file to create engine from.
input_node_name (str): Name of the input node.
input_dims (list): Dimensions of the input tensor.
output_node_names (list): Names of the output nodes.
"""
@override
def __init__(
self,
filenames,
max_batch_size=DEFAULT_MAX_BATCH_SIZE,
min_batch_size=DEFAULT_MIN_BATCH_SIZE,
max_workspace_size=DEFAULT_MAX_WORKSPACE_SIZE,
opt_batch_size=DEFAULT_OPT_BATCH_SIZE,
dtype="fp32",
calibrator=None,
fp32_layer_names=None,
fp16_layer_names=None,
verbose=False,
tensor_scale_dict=None,
dynamic_batch=False,
strict_type=False,
input_dims=None,
):
"""Initialization routine."""
if dtype == "int8":
self._dtype = trt.DataType.INT8
elif dtype == "fp16":
self._dtype = trt.DataType.HALF
elif dtype == "fp32":
self._dtype = trt.DataType.FLOAT
else:
raise ValueError("Unsupported data type: %s" % dtype)
if fp32_layer_names is None:
fp32_layer_names = []
elif dtype != "int8":
raise ValueError(
"FP32 layer precision could be set only when dtype is INT8"
)
if fp16_layer_names is None:
fp16_layer_names = []
elif dtype != "int8":
raise ValueError(
"FP16 layer precision could be set only when dtype is INT8"
)
self._fp32_layer_names = fp32_layer_names
self._fp16_layer_names = fp16_layer_names
self._strict_type = strict_type
self._tensorrt_logger = _create_tensorrt_logger(verbose)
builder = trt.Builder(self._tensorrt_logger)
if self._dtype == trt.DataType.HALF and not builder.platform_has_fast_fp16:
logger.error("Specified FP16 but not supported on platform.")
raise AttributeError("Specified FP16 but not supported on platform.")
return
if self._dtype == trt.DataType.INT8 and not builder.platform_has_fast_int8:
logger.error("Specified INT8 but not supported on platform.")
raise AttributeError("Specified INT8 but not supported on platform.")
return
if self._dtype == trt.DataType.INT8:
if tensor_scale_dict is None and calibrator is None:
logger.error("Specified INT8 but neither calibrator "
"nor tensor_scale_dict is provided.")
raise AttributeError("Specified INT8 but no calibrator "
"or tensor_scale_dict is provided.")
network = builder.create_network(
1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
self._load_from_files([filenames], network)
config = builder.create_builder_config()
if dynamic_batch:
opt_profile = builder.create_optimization_profile()
# input: points
model_input = network.get_input(0)
input_name = model_input.name
input_shape = model_input.shape
real_shape_min = (min_batch_size, input_shape[1], input_shape[2])
real_shape_opt = (opt_batch_size, input_shape[1], input_shape[2])
real_shape_max = (max_batch_size, input_shape[1], input_shape[2])
opt_profile.set_shape(input=input_name,
min=real_shape_min,
opt=real_shape_opt,
max=real_shape_max)
# input: num_points
model_input = network.get_input(1)
input_name = model_input.name
real_shape_min = (min_batch_size,)
real_shape_opt = (opt_batch_size,)
real_shape_max = (max_batch_size,)
opt_profile.set_shape(input=input_name,
min=real_shape_min,
opt=real_shape_opt,
max=real_shape_max)
config.add_optimization_profile(opt_profile)
config.max_workspace_size = max_workspace_size
if self._dtype == trt.DataType.HALF:
config.flags |= 1 << int(trt.BuilderFlag.FP16)
if self._dtype == trt.DataType.INT8:
config.flags |= 1 << int(trt.BuilderFlag.INT8)
if tensor_scale_dict is None:
config.int8_calibrator = calibrator
# When use mixed precision, for TensorRT builder:
# strict_type_constraints needs to be True;
# fp16_mode needs to be True if any layer uses fp16 precision.
strict_type_constraints, fp16_mode = \
_set_excluded_layer_precision(
network=network,
fp32_layer_names=self._fp32_layer_names,
fp16_layer_names=self._fp16_layer_names,
)
if strict_type_constraints:
config.flags |= 1 << int(trt.BuilderFlag.STRICT_TYPES)
if fp16_mode:
config.flags |= 1 << int(trt.BuilderFlag.FP16)
else:
# Discrete Volta GPUs don't have int8 tensor cores. So TensorRT might
# not pick int8 implementation over fp16 or even fp32 for V100
# GPUs found on data centers (e.g., AVDC). This will be a discrepancy
# compared to Turing GPUs including d-GPU of DDPX and also Xavier i-GPU
# both of which have int8 accelerators. We set the builder to strict
# mode to avoid picking higher precision implementation even if they are
# faster.
if self._strict_type:
config.flags |= 1 << int(trt.BuilderFlag.STRICT_TYPES)
else:
config.flags |= 1 << int(trt.BuilderFlag.FP16)
self._set_tensor_dynamic_ranges(
network=network, tensor_scale_dict=tensor_scale_dict
)
engine = builder.build_engine(network, config)
try:
assert engine
except AssertionError:
logger.error("Failed to create engine")
_, _, tb = sys.exc_info()
traceback.print_tb(tb) # Fixed format
tb_info = traceback.extract_tb(tb)
_, line, _, text = tb_info[-1]
raise AssertionError(
"Parsing failed on line {} in statement {}".format(line, text)
)
self._engine = engine
@override
def _load_from_files(self, filenames, network):
filename = filenames[0]
parser = trt.OnnxParser(network, self._tensorrt_logger)
with open(filename, "rb") as model_file:
ret = parser.parse(model_file.read())
for index in range(parser.num_errors):
print(parser.get_error(index))
assert ret, 'ONNX parser failed to parse the model.'
# Note: there might be an issue when running inference on TRT:
# [TensorRT] ERROR: Network must have at least one output.
# See https://github.com/NVIDIA/TensorRT/issues/183.
# Just keep a note in case we have this issue again.
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/tools/export/tensorrt.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PointPillars export APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import logging
import h5py
import numpy as np
"""Logger for data export APIs."""
logger = logging.getLogger(__name__)
class TensorFile(io.RawIOBase):
"""Class to read/write multiple tensors to a file.
The underlying implementation using an HDF5 database
to store data.
Note: this class does not support multiple writers to
the same file.
Args:
filename (str): path to file.
mode (str): mode to open file in.
r Readonly, file must exist
r+ Read/write, file must exist
w Create file, truncate if exists
w- Create file, fail if exists
a Read/write if exists, create otherwise (default)
enforce_same_shape (bool): whether to enforce that all tensors be the same shape.
"""
DEFAULT_ARRAY_KEY = "_tensorfile_array_key_"
GROUP_NAME_PREFIX = "_tensorfile_array_key_"
def __init__(
self, filename, mode="a", enforce_same_shape=True, *args, **kwargs
): # pylint: disable=W1113
"""Init routine."""
super(TensorFile, self).__init__(*args, **kwargs)
logger.debug("Opening %s with mode=%s", filename, mode)
self._enforce_same_shape = enforce_same_shape
self._mode = mode
# Open or create the HDF5 file.
self._db = h5py.File(filename, mode)
if "count" not in self._db.attrs:
self._db.attrs["count"] = 0
if "r" in mode:
self._cursor = 0
else:
self._cursor = self._db.attrs["count"]
def _get_group_name(cls, cursor):
"""Return the name of the H5 dataset to create, given a cursor index."""
return "%s_%d" % (cls.GROUP_NAME_PREFIX, cursor)
def _write_data(self, group, data):
for key, value in data.items():
if isinstance(value, dict):
self._write_data(group.create_group(key), value)
elif isinstance(value, np.ndarray):
if self._enforce_same_shape:
if "shape" not in self._db.attrs:
self._db.attrs["shape"] = value.shape
else:
expected_shape = tuple(self._db.attrs["shape"].tolist())
if expected_shape != value.shape:
raise ValueError(
"Shape mismatch: %s v.s. %s"
% (str(expected_shape), str(value.shape))
)
group.create_dataset(key, data=value, compression="gzip")
else:
raise ValueError(
"Only np.ndarray or dicts can be written into a TensorFile."
)
def close(self):
"""Close this file."""
self._db.close()
# For python2.
def next(self):
"""Return next element."""
return self.__next__()
# For python3.
def __next__(self):
"""Return next element."""
if self._cursor < self._db.attrs["count"]:
return self.read()
raise StopIteration()
def _read_data(self, group):
if isinstance(group, h5py.Group):
data = {key: self._read_data(value) for key, value in group.items()}
else:
data = group[()]
return data
def read(self):
"""Read from current cursor.
Return array assigned to current cursor, or ``None`` to indicate
the end of the file.
"""
if not self.readable():
raise IOError("Instance is not readable.")
group_name = self._get_group_name(self._cursor)
if group_name in self._db:
self._cursor += 1
group = self._db[group_name]
data = self._read_data(group)
if list(data.keys()) == [self.DEFAULT_ARRAY_KEY]:
# The only key in this group is the default key.
# Return the numpy array directly.
return data[self.DEFAULT_ARRAY_KEY]
return data
return None
def readable(self):
"""Return whether this instance is readable."""
return self._mode in ["r", "r+", "a"]
def seekable(self):
"""Return whether this instance is seekable."""
return True
def seek(self, n):
"""Move cursor."""
self._cursor = min(n, self._db.attrs["count"])
return self._cursor
def tell(self):
"""Return current cursor index."""
return self._cursor
def truncate(self, n):
"""Truncation is not supported."""
raise IOError("Truncate operation is not supported.")
def writable(self):
"""Return whether this instance is writable."""
return self._mode in ["r+", "w", "w-", "a"]
def write(self, data):
"""Write a Numpy array or a dictionary of numpy arrays into file."""
if not self.writable():
raise IOError("Instance is not writable.")
if isinstance(data, np.ndarray):
data = {self.DEFAULT_ARRAY_KEY: data}
group_name = self._get_group_name(self._cursor)
# Delete existing instance of datasets at this cursor position.
if group_name in self._db:
del self._db[group_name]
group = self._db.create_group(group_name)
self._write_data(group, data)
self._cursor += 1
if self._cursor > self._db.attrs["count"]:
self._db.attrs["count"] = self._cursor
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/tools/export/data.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PointPillars decorators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
def override(method):
"""
Override decorator.
Decorator implementing method overriding in python
Must also use the @subclass class decorator
"""
method.override = True
return method
def subclass(class_object):
"""
Subclass decorator.
Verify all @override methods
Use a class decorator to find the method's class
"""
for name, method in class_object.__dict__.items():
if hasattr(method, "override"):
found = False
for base_class in inspect.getmro(class_object)[1:]:
if name in base_class.__dict__:
if not method.__doc__:
# copy docstring
method.__doc__ = base_class.__dict__[name].__doc__
found = True
break
assert found, '"%s.%s" not found in any base class' % (
class_object.__name__,
name,
)
return class_object
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/tools/export/decorators.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config template and utils for PointPillars."""
from pathlib import Path
import yaml
from easydict import EasyDict
def log_config_to_file(cfg, pre='cfg', logger=None):
"""Log config to file."""
for key, val in cfg.items():
if isinstance(cfg[key], EasyDict):
logger.info('\n%s.%s = edict()' % (pre, key))
log_config_to_file(cfg[key], pre=pre + '.' + key, logger=logger)
continue
logger.info('%s.%s: %s' % (pre, key, val))
def cfg_from_list(cfg_list, config):
"""Set config keys via list (e.g., from command line)."""
from ast import literal_eval
assert len(cfg_list) % 2 == 0
for k, v in zip(cfg_list[0::2], cfg_list[1::2]):
key_list = k.split('.')
d = config
for subkey in key_list[:-1]:
assert subkey in d, 'NotFoundKey: %s' % subkey
d = d[subkey]
subkey = key_list[-1]
assert subkey in d, 'NotFoundKey: %s' % subkey
try:
value = literal_eval(v)
except: # noqa: E722
value = v
if (not isinstance(value, type(d[subkey]))) and isinstance(d[subkey], EasyDict):
key_val_list = value.split(',')
for src in key_val_list:
cur_key, cur_val = src.split(':')
val_type = type(d[subkey][cur_key])
cur_val = val_type(cur_val)
d[subkey][cur_key] = cur_val
elif isinstance(value, type(d[subkey])) and isinstance(d[subkey], list):
val_list = value.split(',')
for k, x in enumerate(val_list):
val_list[k] = type(d[subkey][0])(x)
d[subkey] = val_list
else:
assert isinstance(value, type(d[subkey])), \
'type {} does not match original type {}'.format(type(value), type(d[subkey]))
d[subkey] = value
def merge_new_config(config, new_config):
"""Merge new config."""
if '_BASE_CONFIG_' in new_config:
with open(new_config['_BASE_CONFIG_'], 'r') as f:
try:
yaml_config = yaml.load(f, Loader=yaml.FullLoader) # nosec
except: # noqa: E722
yaml_config = yaml.safe_load(f)
config.update(EasyDict(yaml_config))
for key, val in new_config.items():
if not isinstance(val, dict):
config[key] = val
continue
if key not in config:
config[key] = EasyDict()
merge_new_config(config[key], val)
return config
def cfg_from_yaml_file(cfg_file, config):
"""Parse config from yaml file."""
with open(cfg_file, 'r') as f:
new_config = yaml.load(f, Loader=yaml.FullLoader) # nosec
merge_new_config(config=config, new_config=new_config)
# Set defaults for optional parameters
if not hasattr(config.train, "resume_training_checkpoint_path"):
config.train.resume_training_checkpoint_path = None
if not hasattr(config.model, "pretrained_model_path"):
config.model.pretrained_model_path = None
if not hasattr(config.train, "pruned_model_path"):
config.train.pruned_model_path = None
if not hasattr(config.train, "random_seed"):
config.train.random_seed = None
if not hasattr(config, "results_dir"):
config.results_dir = None
if not hasattr(config, "class_names"):
config.class_names = config.dataset.class_names
if not hasattr(config, "export"):
config.export = EasyDict()
if not hasattr(config.export, "onnx_file"):
config.export.onnx_file = None
if not hasattr(config.export, "checkpoint"):
config.export.checkpoint = None
if not hasattr(config.export, "gpu_id"):
config.export.gpu_id = None
if not hasattr(config, "prune"):
config.prune = EasyDict()
if not hasattr(config.prune, "model"):
config.prune.model = None
return config
cfg = EasyDict()
cfg.ROOT_DIR = (Path(__file__).resolve().parent / '../').resolve()
cfg.LOCAL_RANK = 0
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Version of PCDet module."""
__version__ = "0.3.0+nv"
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/version.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PCDet root module."""
import subprocess
from pathlib import Path
from .version import __version__
__all__ = [
'__version__'
]
def get_git_commit_number():
"""Get git commit number."""
if not (Path(__file__).parent / '../.git').exists():
return '0000000'
cmd_out = subprocess.run(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE)
git_commit_number = cmd_out.stdout.decode('utf-8')[:7]
return git_commit_number
script_version = get_git_commit_number()
if script_version not in __version__:
__version__ = __version__ + '+py%s' % script_version
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset module for PointPillars."""
import torch
from torch.utils.data import DataLoader
from torch.utils.data import DistributedSampler as _DistributedSampler
from nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.utils import common_utils
from .dataset import DatasetTemplate
from .kitti.kitti_dataset import KittiDataset
from .general.pc_dataset import GeneralPCDataset
__all__ = {
'DatasetTemplate': DatasetTemplate,
'KittiDataset': KittiDataset,
'GeneralPCDataset': GeneralPCDataset
}
class DistributedSampler(_DistributedSampler):
"""Distributed sampler."""
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
"""Initialize."""
super().__init__(dataset, num_replicas=num_replicas, rank=rank)
self.shuffle = shuffle
def __iter__(self):
"""Iter."""
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
def build_dataloader(dataset_cfg, class_names, batch_size, dist, root_path=None, workers=4,
logger=None, training=True, merge_all_iters_to_one_epoch=False, total_epochs=0):
"""Build data loader."""
dataset = __all__[dataset_cfg.type](
dataset_cfg=dataset_cfg,
class_names=class_names,
root_path=root_path,
training=training,
logger=logger,
)
if merge_all_iters_to_one_epoch:
assert hasattr(dataset, 'merge_all_iters_to_one_epoch')
dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
if dist:
if training:
sampler = torch.utils.data.distributed.DistributedSampler(dataset)
else:
rank, world_size = common_utils.get_dist_info()
sampler = DistributedSampler(dataset, world_size, rank, shuffle=False)
else:
sampler = None
dataloader = DataLoader(
dataset, batch_size=batch_size, pin_memory=True, num_workers=workers,
shuffle=(sampler is None) and training, collate_fn=dataset.collate_batch,
drop_last=False, sampler=sampler, timeout=0
)
return dataset, dataloader, sampler
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/datasets/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset Template class."""
from collections import defaultdict
from pathlib import Path
import numpy as np
import torch.utils.data as torch_data
from ..utils import common_utils
from .augmentor.data_augmentor import DataAugmentor
from .processor.data_processor import DataProcessor
from .processor.point_feature_encoder import PointFeatureEncoder
from nvidia_tao_pytorch.core.path_utils import expand_path
class DatasetTemplate(torch_data.Dataset):
"""Dataset Template class."""
def __init__(self, dataset_cfg=None, class_names=None, training=True, root_path=None, logger=None):
"""Initialize."""
super().__init__()
self.dataset_cfg = dataset_cfg
self.training = training
self.class_names = class_names
self.logger = logger
self.root_path = root_path if root_path is not None else Path(expand_path(self.dataset_cfg.data_path))
self.logger = logger
if self.dataset_cfg is None or class_names is None:
return
self.point_cloud_range = np.array(self.dataset_cfg.point_cloud_range, dtype=np.float32)
self.point_feature_encoder = PointFeatureEncoder(
self.dataset_cfg.point_feature_encoding,
point_cloud_range=self.point_cloud_range
)
self.data_augmentor = DataAugmentor(
self.root_path, self.dataset_cfg.data_augmentor, self.class_names, logger=self.logger
) if self.training else None
self.data_processor = DataProcessor(
self.dataset_cfg.data_processor,
point_cloud_range=self.point_cloud_range,
training=self.training,
num_point_features=self.dataset_cfg.data_augmentor.aug_config_list[0].num_point_features
)
self.grid_size = self.data_processor.grid_size
self.voxel_size = self.data_processor.voxel_size
self.total_epochs = 0
self._merge_all_iters_to_one_epoch = False
@property
def mode(self):
"""mode of training."""
return 'train' if self.training else 'test'
def __getstate__(self):
"""Get state."""
d = dict(self.__dict__)
if "logger" in d:
del d['logger']
return d
def __setstate__(self, d):
"""Set state."""
self.__dict__.update(d)
@staticmethod
def generate_prediction_dicts(batch_dict, pred_dicts, class_names, output_path=None):
"""
To support a custom dataset, implement this function to receive the predicted results from the model, and then
transform the unified normative coordinate to your required coordinate, and optionally save them to disk.
Args:
batch_dict: dict of original data from the dataloader
pred_dicts: dict of predicted results from the model
pred_boxes: (N, 7), Tensor
pred_scores: (N), Tensor
pred_labels: (N), Tensor
class_names:
output_path: if it is not None, save the results to this path
Returns:
"""
def merge_all_iters_to_one_epoch(self, merge=True, epochs=None):
"""Merge all iterations to one epoch."""
if merge:
self._merge_all_iters_to_one_epoch = True
self.total_epochs = epochs
else:
self._merge_all_iters_to_one_epoch = False
def __len__(self):
"""Length of dataset."""
raise NotImplementedError
def __getitem__(self, index):
"""
To support a custom dataset, implement this function to load the raw data (and labels), then transform them to
the unified normative coordinate and call the function self.prepare_data() to process the data and send them
to the model.
Args:
index:
Returns:
"""
raise NotImplementedError
def prepare_data(self, data_dict):
"""
Args:
data_dict:
points: (N, 3 + C_in)
gt_boxes: optional, (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
gt_names: optional, (N), string
...
Returns:
data_dict:
frame_id: string
points: (N, 3 + C_in)
gt_boxes: optional, (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
gt_names: optional, (N), string
use_lead_xyz: bool
voxels: optional (num_voxels, max_points_per_voxel, 3 + C)
voxel_coords: optional (num_voxels, 3)
voxel_num_points: optional (num_voxels)
...
"""
if self.training:
assert 'gt_boxes' in data_dict, 'gt_boxes should be provided for training'
gt_boxes_mask = np.array([n in self.class_names for n in data_dict['gt_names']], dtype=np.bool_)
data_dict = self.data_augmentor.forward(
data_dict={
**data_dict,
'gt_boxes_mask': gt_boxes_mask
}
)
if len(data_dict['gt_boxes']) == 0:
new_index = np.random.randint(self.__len__())
return self.__getitem__(new_index)
if data_dict.get('gt_boxes', None) is not None:
selected = common_utils.keep_arrays_by_name(data_dict['gt_names'], self.class_names)
data_dict['gt_boxes'] = data_dict['gt_boxes'][selected]
data_dict['gt_names'] = data_dict['gt_names'][selected]
gt_classes = np.array([self.class_names.index(n) + 1 for n in data_dict['gt_names']], dtype=np.int32)
gt_boxes = np.concatenate((data_dict['gt_boxes'], gt_classes.reshape(-1, 1).astype(np.float32)), axis=1)
data_dict['gt_boxes'] = gt_boxes
data_dict = self.point_feature_encoder.forward(data_dict)
data_dict = self.data_processor.forward(
data_dict=data_dict
)
data_dict.pop('gt_names', None)
return data_dict
@staticmethod
def collate_batch(batch_list, _unused=False):
"""Batch samples."""
data_dict = defaultdict(list)
for cur_sample in batch_list:
for key, val in cur_sample.items():
data_dict[key].append(val)
batch_size = len(batch_list)
ret = {}
for key, val in data_dict.items():
try:
if key in ['voxels', 'voxel_num_points']:
ret[key] = np.concatenate(val, axis=0)
elif key in ['points', 'voxel_coords']:
coors = []
for i, coor in enumerate(val):
coor_pad = np.pad(coor, ((0, 0), (1, 0)), mode='constant', constant_values=i)
coors.append(coor_pad)
ret[key] = np.concatenate(coors, axis=0)
elif key in ['gt_boxes']:
max_gt = max([len(x) for x in val])
batch_gt_boxes3d = np.zeros((batch_size, max_gt, val[0].shape[-1]), dtype=np.float32)
for k in range(batch_size):
batch_gt_boxes3d[k, :val[k].__len__(), :] = val[k]
ret[key] = batch_gt_boxes3d
else:
ret[key] = np.stack(val, axis=0)
except: # noqa: E722
print('Error in collate_batch: key=%s' % key)
raise TypeError
ret['batch_size'] = batch_size
return ret
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/datasets/dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data augmentations for PointPillars."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/datasets/augmentor/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data Augmentor."""
from functools import partial
import numpy as np
from ...utils import common_utils
from . import augmentor_utils, database_sampler
class DataAugmentor(object):
"""Data Augmentor class."""
def __init__(self, root_path, augmentor_configs, class_names, logger=None):
"""Initialize."""
self.root_path = root_path
self.class_names = class_names
self.logger = logger
self.data_augmentor_queue = []
aug_config_list = augmentor_configs if isinstance(augmentor_configs, list) \
else augmentor_configs.aug_config_list
for cur_cfg in aug_config_list:
if not isinstance(augmentor_configs, list):
if cur_cfg.name in augmentor_configs.disable_aug_list:
continue
# cur_augmentor = getattr(self, cur_cfg.name)(config=cur_cfg)
func_map = {key: getattr(DataAugmentor, key) for key in vars(DataAugmentor) if not key.startswith("__")}
if cur_cfg.name in func_map:
cur_augmentor = func_map[cur_cfg.name](self, config=cur_cfg)
self.data_augmentor_queue.append(cur_augmentor)
def gt_sampling(self, config=None):
"""Groundtruth sampling."""
db_sampler = database_sampler.DataBaseSampler(
root_path=self.root_path,
sampler_cfg=config,
class_names=self.class_names,
logger=self.logger
)
return db_sampler
def __getstate__(self):
"""Get state."""
d = dict(self.__dict__)
if "logger" in d:
del d['logger']
return d
def __setstate__(self, d):
"""Set state."""
self.__dict__.update(d)
def random_world_flip(self, data_dict=None, config=None):
"""Random world flip."""
if data_dict is None:
return partial(self.random_world_flip, config=config)
gt_boxes, points = data_dict['gt_boxes'], data_dict['points']
for cur_axis in config['along_axis_list']:
assert cur_axis in ['x', 'y']
gt_boxes, points = getattr(augmentor_utils, 'random_flip_along_%s' % cur_axis)(
gt_boxes, points,
)
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
return data_dict
def random_world_rotation(self, data_dict=None, config=None):
"""Random world rotation."""
if data_dict is None:
return partial(self.random_world_rotation, config=config)
rot_range = config['world_rot_angle']
if not isinstance(rot_range, list):
rot_range = [-rot_range, rot_range]
gt_boxes, points = augmentor_utils.global_rotation(
data_dict['gt_boxes'], data_dict['points'], rot_range=rot_range
)
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
return data_dict
def random_world_scaling(self, data_dict=None, config=None):
"""Random world scaling."""
if data_dict is None:
return partial(self.random_world_scaling, config=config)
gt_boxes, points = augmentor_utils.global_scaling(
data_dict['gt_boxes'], data_dict['points'], config['world_scale_range']
)
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
return data_dict
def forward(self, data_dict):
"""
Args:
data_dict:
points: (N, 3 + C_in)
gt_boxes: optional, (N, 7) [x, y, z, dx, dy, dz, heading]
gt_names: optional, (N), string
...
Returns:
"""
for cur_augmentor in self.data_augmentor_queue:
data_dict = cur_augmentor(data_dict=data_dict)
data_dict['gt_boxes'][:, 6] = common_utils.limit_period(
data_dict['gt_boxes'][:, 6], offset=0.5, period=2 * np.pi
)
if 'calib' in data_dict:
data_dict.pop('calib')
if 'road_plane' in data_dict:
data_dict.pop('road_plane')
if 'gt_boxes_mask' in data_dict:
gt_boxes_mask = data_dict['gt_boxes_mask']
data_dict['gt_boxes'] = data_dict['gt_boxes'][gt_boxes_mask]
data_dict['gt_names'] = data_dict['gt_names'][gt_boxes_mask]
data_dict.pop('gt_boxes_mask')
return data_dict
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/datasets/augmentor/data_augmentor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Database sampler."""
import pickle
import numpy as np
from ...ops.iou3d_nms import iou3d_nms_utils
from ...utils import box_utils
class DataBaseSampler(object):
"""Data base sampler."""
def __init__(self, root_path, sampler_cfg, class_names, logger=None):
"""Initialize."""
self.root_path = root_path
self.class_names = class_names
self.sampler_cfg = sampler_cfg
self.logger = logger
self.db_infos = {}
for class_name in class_names:
self.db_infos[class_name] = []
for db_info_path in sampler_cfg.db_info_path:
db_info_path = self.root_path.resolve() / db_info_path
with open(str(db_info_path), 'rb') as f:
infos = pickle.load(f)
for cur_class in class_names:
self.db_infos[cur_class].extend(infos[cur_class])
for func_name, val in sampler_cfg.preface.items():
self.db_infos = getattr(self, func_name)(self.db_infos, val)
self.sample_groups = {}
self.sample_class_num = {}
self.limit_whole_scene = sampler_cfg.get('limit_whole_scene', False)
for x in sampler_cfg.sample_groups:
class_name, sample_num = x.split(':')
if class_name not in class_names:
continue
self.sample_class_num[class_name] = sample_num
self.sample_groups[class_name] = {
'sample_num': sample_num,
'pointer': len(self.db_infos[class_name]),
'indices': np.arange(len(self.db_infos[class_name]))
}
def __getstate__(self):
"""Get state."""
d = dict(self.__dict__)
if "logger" in d:
del d['logger']
return d
def __setstate__(self, d):
"""Set state."""
self.__dict__.update(d)
def filter_by_difficulty(self, db_infos, removed_difficulty):
"""Filter by difficulty."""
new_db_infos = {}
for key, dinfos in db_infos.items():
pre_len = len(dinfos)
new_db_infos[key] = [
info for info in dinfos
if info['difficulty'] not in removed_difficulty
]
if self.logger is not None:
self.logger.info('Database filter by difficulty %s: %d => %d' % (key, pre_len, len(new_db_infos[key])))
return new_db_infos
def filter_by_min_points(self, db_infos, min_gt_points_list):
"""Filter by min points."""
for name_num in min_gt_points_list:
name, min_num = name_num.split(':')
min_num = int(min_num)
if min_num > 0 and name in db_infos.keys():
filtered_infos = []
for info in db_infos[name]:
if info['num_points_in_gt'] >= min_num:
filtered_infos.append(info)
if self.logger is not None:
self.logger.info('Database filter by min points %s: %d => %d' %
(name, len(db_infos[name]), len(filtered_infos)))
db_infos[name] = filtered_infos
return db_infos
def sample_with_fixed_number(self, class_name, sample_group):
"""
Args:
class_name:
sample_group:
Returns:
"""
sample_num, pointer, indices = int(sample_group['sample_num']), sample_group['pointer'], sample_group['indices']
if pointer >= len(self.db_infos[class_name]):
indices = np.random.permutation(len(self.db_infos[class_name]))
pointer = 0
sampled_dict = [self.db_infos[class_name][idx] for idx in indices[pointer: pointer + sample_num]]
pointer += sample_num
sample_group['pointer'] = pointer
sample_group['indices'] = indices
return sampled_dict
@staticmethod
def put_boxes_on_road_planes(gt_boxes, road_planes, calib):
"""
Only validate in KITTIDataset
Args:
gt_boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
road_planes: [a, b, c, d]
calib:
Returns:
"""
a, b, c, d = road_planes
center_cam = calib.lidar_to_rect(gt_boxes[:, 0:3])
cur_height_cam = (-d - a * center_cam[:, 0] - c * center_cam[:, 2]) / b
center_cam[:, 1] = cur_height_cam
cur_lidar_height = calib.rect_to_lidar(center_cam)[:, 2]
mv_height = gt_boxes[:, 2] - gt_boxes[:, 5] / 2 - cur_lidar_height
gt_boxes[:, 2] -= mv_height # lidar view
return gt_boxes, mv_height
def add_sampled_boxes_to_scene(self, data_dict, sampled_gt_boxes, total_valid_sampled_dict):
"""Add sampled boxes to scene."""
gt_boxes_mask = data_dict['gt_boxes_mask']
gt_boxes = data_dict['gt_boxes'][gt_boxes_mask]
gt_names = data_dict['gt_names'][gt_boxes_mask]
points = data_dict['points']
if self.sampler_cfg.get('USE_ROAD_PLANE', False):
sampled_gt_boxes, mv_height = self.put_boxes_on_road_planes(
sampled_gt_boxes, data_dict['road_plane'], data_dict['calib']
)
data_dict.pop('calib')
data_dict.pop('road_plane')
obj_points_list = []
for idx, info in enumerate(total_valid_sampled_dict):
file_path = self.root_path / info['path']
obj_points = np.fromfile(str(file_path), dtype=np.float32).reshape(
[-1, self.sampler_cfg.num_point_features])
obj_points[:, :3] += info['box3d_lidar'][:3]
if self.sampler_cfg.get('USE_ROAD_PLANE', False):
# mv height
obj_points[:, 2] -= mv_height[idx]
obj_points_list.append(obj_points)
obj_points = np.concatenate(obj_points_list, axis=0)
sampled_gt_names = np.array([x['name'] for x in total_valid_sampled_dict])
large_sampled_gt_boxes = box_utils.enlarge_box3d(
sampled_gt_boxes[:, 0:7], extra_width=self.sampler_cfg.remove_extra_width
)
points = box_utils.remove_points_in_boxes3d(points, large_sampled_gt_boxes)
points = np.concatenate([obj_points, points], axis=0)
gt_names = np.concatenate([gt_names, sampled_gt_names], axis=0)
gt_boxes = np.concatenate([gt_boxes, sampled_gt_boxes], axis=0)
data_dict['gt_boxes'] = gt_boxes
data_dict['gt_names'] = gt_names
data_dict['points'] = points
return data_dict
def __call__(self, data_dict):
"""
Args:
data_dict:
gt_boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
Returns:
"""
gt_boxes = data_dict['gt_boxes']
gt_names = data_dict['gt_names'].astype(str)
existed_boxes = gt_boxes
total_valid_sampled_dict = []
for class_name, sample_group in self.sample_groups.items():
if self.limit_whole_scene:
num_gt = np.sum(class_name == gt_names)
sample_group['sample_num'] = str(int(self.sample_class_num[class_name]) - num_gt)
if int(sample_group['sample_num']) > 0:
sampled_dict = self.sample_with_fixed_number(class_name, sample_group)
sampled_boxes = np.stack([x['box3d_lidar'] for x in sampled_dict], axis=0).astype(np.float32)
if self.sampler_cfg.get('disable_with_fake_lidar', False):
sampled_boxes = box_utils.boxes3d_kitti_fakelidar_to_lidar(sampled_boxes)
iou1 = iou3d_nms_utils.boxes_bev_iou_cpu(sampled_boxes[:, 0:7], existed_boxes[:, 0:7])
iou2 = iou3d_nms_utils.boxes_bev_iou_cpu(sampled_boxes[:, 0:7], sampled_boxes[:, 0:7])
iou2[range(sampled_boxes.shape[0]), range(sampled_boxes.shape[0])] = 0
iou1 = iou1 if iou1.shape[1] > 0 else iou2
valid_mask = ((iou1.max(axis=1) + iou2.max(axis=1)) == 0).nonzero()[0]
valid_sampled_dict = [sampled_dict[x] for x in valid_mask]
valid_sampled_boxes = sampled_boxes[valid_mask]
existed_boxes = np.concatenate((existed_boxes, valid_sampled_boxes), axis=0)
total_valid_sampled_dict.extend(valid_sampled_dict)
sampled_gt_boxes = existed_boxes[gt_boxes.shape[0]:, :]
if total_valid_sampled_dict.__len__() > 0:
data_dict = self.add_sampled_boxes_to_scene(data_dict, sampled_gt_boxes, total_valid_sampled_dict)
data_dict.pop('gt_boxes_mask')
return data_dict
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/datasets/augmentor/database_sampler.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for data augmentations."""
import numpy as np
from ...utils import common_utils
def random_flip_along_x(gt_boxes, points):
"""
Args:
gt_boxes: (N, 7 + C), [x, y, z, dx, dy, dz, heading, [vx], [vy]]
points: (M, 3 + C)
Returns:
"""
enable = np.random.choice([False, True], replace=False, p=[0.5, 0.5])
if enable:
gt_boxes[:, 1] = -gt_boxes[:, 1]
gt_boxes[:, 6] = -gt_boxes[:, 6]
points[:, 1] = -points[:, 1]
if gt_boxes.shape[1] > 7:
gt_boxes[:, 8] = -gt_boxes[:, 8]
return gt_boxes, points
def random_flip_along_y(gt_boxes, points):
"""
Args:
gt_boxes: (N, 7 + C), [x, y, z, dx, dy, dz, heading, [vx], [vy]]
points: (M, 3 + C)
Returns:
"""
enable = np.random.choice([False, True], replace=False, p=[0.5, 0.5])
if enable:
gt_boxes[:, 0] = -gt_boxes[:, 0]
gt_boxes[:, 6] = -(gt_boxes[:, 6] + np.pi)
points[:, 0] = -points[:, 0]
if gt_boxes.shape[1] > 7:
gt_boxes[:, 7] = -gt_boxes[:, 7]
return gt_boxes, points
def global_rotation(gt_boxes, points, rot_range):
"""
Args:
gt_boxes: (N, 7 + C), [x, y, z, dx, dy, dz, heading, [vx], [vy]]
points: (M, 3 + C),
rot_range: [min, max]
Returns:
"""
noise_rotation = np.random.uniform(rot_range[0], rot_range[1])
points = common_utils.rotate_points_along_z(points[np.newaxis, :, :], np.array([noise_rotation]))[0]
gt_boxes[:, 0:3] = common_utils.rotate_points_along_z(gt_boxes[np.newaxis, :, 0:3], np.array([noise_rotation]))[0]
gt_boxes[:, 6] += noise_rotation
if gt_boxes.shape[1] > 7:
gt_boxes[:, 7:9] = common_utils.rotate_points_along_z(
np.hstack((gt_boxes[:, 7:9], np.zeros((gt_boxes.shape[0], 1))))[np.newaxis, :, :],
np.array([noise_rotation])
)[0][:, 0:2]
return gt_boxes, points
def global_scaling(gt_boxes, points, scale_range):
"""
Args:
gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading]
points: (M, 3 + C),
scale_range: [min, max]
Returns:
"""
if scale_range[1] - scale_range[0] < 1e-3:
return gt_boxes, points
noise_scale = np.random.uniform(scale_range[0], scale_range[1])
points[:, :3] *= noise_scale
gt_boxes[:, :6] *= noise_scale
return gt_boxes, points
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/datasets/augmentor/augmentor_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic PintPillars data loader."""
import copy
import os
import pickle
import numpy as np
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...utils import object3d_general
from ..dataset import DatasetTemplate
class GeneralPCDataset(DatasetTemplate):
"""Generic data loader."""
def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None):
"""
Args:
root_path:
dataset_cfg:
class_names:
training:
logger:
"""
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
)
self.num_point_features = self.dataset_cfg.data_augmentor.aug_config_list[0].num_point_features
self.split = self.dataset_cfg.data_split[self.mode]
self.root_split_path = self.root_path / self.split
lidar_path = self.root_split_path / "lidar"
sample_id_list = os.listdir(lidar_path)
assert len(sample_id_list), "lidar directory is empty"
# strip .bin suffix
self.sample_id_list = [x[:-4] for x in sample_id_list]
for sid in self.sample_id_list:
if len(self.get_label(sid)) == 0:
raise IOError(
f"Got empty label for sample {sid} in {self.split} split"
", please check the dataset"
)
self.infos = []
self.include_data(self.mode)
if self.training and self.dataset_cfg.get('balanced_resampling', False):
self.infos = self.balanced_infos_resampling(self.infos)
def include_data(self, mode):
"""Inlcude data files."""
if self.logger is not None:
self.logger.info('Loading point cloud dataset')
pc_infos = []
for info_path in self.dataset_cfg.info_path[mode]:
info_path = self.root_path / info_path
if not info_path.exists():
continue
with open(info_path, 'rb') as f:
infos = pickle.load(f)
pc_infos.extend(infos)
self.infos.extend(pc_infos)
if self.logger is not None:
self.logger.info('Total samples for point cloud dataset: %d' % (len(pc_infos)))
def balanced_infos_resampling(self, infos):
"""
Class-balanced sampling of nuScenes dataset from https://arxiv.org/abs/1908.09492
"""
if self.class_names is None:
return infos
cls_infos = {name: [] for name in self.class_names}
for info in infos:
for name in set(info["annos"]['name']):
if name in self.class_names:
cls_infos[name].append(info)
duplicated_samples = sum([len(v) for _, v in cls_infos.items()])
cls_dist = {k: len(v) / duplicated_samples for k, v in cls_infos.items()}
sampled_infos = []
frac = 1.0 / len(self.class_names)
ratios = [frac / v for v in cls_dist.values()]
for cur_cls_infos, ratio in zip(list(cls_infos.values()), ratios):
sampled_infos += np.random.choice(
cur_cls_infos, int(len(cur_cls_infos) * ratio)
).tolist()
if self.logger is not None:
self.logger.info('Total samples after balanced resampling: %s' % (len(sampled_infos)))
cls_infos_new = {name: [] for name in self.class_names}
for info in sampled_infos:
for name in set(info["annos"]['name']):
if name in self.class_names:
cls_infos_new[name].append(info)
return sampled_infos
def set_split(self, split):
"""Setup train/val split."""
super().__init__(
dataset_cfg=self.dataset_cfg, class_names=self.class_names, training=self.training, root_path=self.root_path, logger=self.logger
)
self.split = split
self.root_split_path = self.root_path / self.split
lidar_path = self.root_split_path / "lidar"
sample_id_list = []
if os.path.isdir(lidar_path):
sample_id_list = os.listdir(lidar_path)
else:
raise NotADirectoryError(f"{lidar_path} is not a directory")
assert len(sample_id_list), "lidar directory is empty"
# strip .bin suffix
self.sample_id_list = [x[:-4] for x in sample_id_list]
for sid in self.sample_id_list:
if len(self.get_label(sid)) == 0:
raise IOError(
f"Got empty label for sample {sid} in {split} split"
", please check the dataset"
)
def get_lidar(self, idx):
"""Get LIDAR points."""
lidar_file = self.root_split_path / 'lidar' / ('%s.bin' % idx)
assert lidar_file.exists()
points = np.fromfile(str(lidar_file), dtype=np.float32).reshape(-1, self.num_point_features)
return points
def get_label(self, idx):
"""Get KITTI labels."""
label_file = self.root_split_path / 'label' / ('%s.txt' % idx)
assert label_file.exists()
return object3d_general.get_objects_from_label(label_file)
def get_infos(self, num_workers=4, has_label=True, count_inside_pts=True, sample_id_list=None):
"""Get statistics info."""
import concurrent.futures as futures
def process_single_scene(sample_idx):
print(f'{self.split} sample_idx: f{sample_idx}')
info = {}
pc_info = {'num_features': self.num_point_features, 'lidar_idx': sample_idx}
info['point_cloud'] = pc_info
if has_label:
obj_list = self.get_label(sample_idx)
annotations = {}
annotations['name'] = np.array([obj.cls_type for obj in obj_list])
annotations['truncated'] = np.array([obj.truncation for obj in obj_list])
annotations['occluded'] = np.array([obj.occlusion for obj in obj_list])
annotations['alpha'] = np.array([obj.alpha for obj in obj_list])
annotations['bbox'] = np.concatenate([obj.box2d.reshape(1, 4) for obj in obj_list], axis=0)
annotations['dimensions'] = np.array([[obj.l, obj.h, obj.w] for obj in obj_list]) # lhw(camera) format
annotations['location'] = np.concatenate([obj.loc.reshape(1, 3) for obj in obj_list], axis=0)
annotations['rotation_y'] = np.array([obj.ry for obj in obj_list])
annotations['score'] = np.array([obj.score for obj in obj_list])
num_objects = len(obj_list)
index = list(range(num_objects))
annotations['index'] = np.array(index, dtype=np.int32)
loc = np.copy(annotations['location'])
dims = annotations['dimensions']
rots = annotations['rotation_y']
l, h, w = dims[:, 0:1], dims[:, 1:2], dims[:, 2:3]
gt_boxes_lidar = np.concatenate([loc, l, w, h, rots[..., np.newaxis]], axis=1)
annotations['gt_boxes_lidar'] = gt_boxes_lidar
info['annos'] = annotations
return info
sample_id_list = sample_id_list if sample_id_list is not None else self.sample_id_list
with futures.ThreadPoolExecutor(num_workers) as executor:
infos = executor.map(process_single_scene, sample_id_list)
return list(infos)
def create_groundtruth_database(self, info_path=None, used_classes=None, split='train'):
"""Create groundtruth database for augmentation."""
import torch
from pathlib import Path
database_save_path = Path(self.root_path) / ('gt_database' if split == 'train' else ('gt_database_%s' % split))
db_info_save_path = Path(self.root_path) / ('dbinfos_%s.pkl' % split)
database_save_path.mkdir(parents=True, exist_ok=True)
all_db_infos = {}
infos = np.load(info_path, allow_pickle=True)
for k in range(len(infos)):
print('gt_database sample: %d/%d' % (k + 1, len(infos)))
info = infos[k]
sample_idx = info['point_cloud']['lidar_idx']
points = self.get_lidar(sample_idx)
annos = info['annos']
names = annos['name']
gt_boxes = annos['gt_boxes_lidar']
num_obj = gt_boxes.shape[0]
point_indices = roiaware_pool3d_utils.points_in_boxes_cpu(
torch.from_numpy(points[:, 0:3]), torch.from_numpy(gt_boxes)
).numpy() # (nboxes, npoints)
for i in range(num_obj):
filename = '%s_%s_%d.bin' % (sample_idx, names[i], i)
filepath = database_save_path / filename
gt_points = points[point_indices[i] > 0]
gt_points[:, :3] -= gt_boxes[i, :3]
with open(filepath, 'w') as f:
gt_points.tofile(f)
if (used_classes is None) or names[i] in used_classes:
db_path = str(filepath.relative_to(self.root_path)) # gt_database/xxxxx.bin
db_info = {'name': names[i], 'path': db_path, 'image_idx': sample_idx, 'gt_idx': i,
'box3d_lidar': gt_boxes[i], 'num_points_in_gt': gt_points.shape[0],
}
if names[i] in all_db_infos:
all_db_infos[names[i]].append(db_info)
else:
all_db_infos[names[i]] = [db_info]
for k, v in all_db_infos.items():
print('Database %s: %d' % (k, len(v)))
with open(db_info_save_path, 'wb') as f:
pickle.dump(all_db_infos, f)
@staticmethod
def generate_prediction_dicts(batch_dict, pred_dicts, class_names, output_path=None):
"""
Args:
batch_dict:
frame_id:
pred_dicts: list of pred_dicts
pred_boxes: (N, 7), Tensor
pred_scores: (N), Tensor
pred_labels: (N), Tensor
class_names:
output_path:
Returns:
"""
def get_template_prediction(num_samples):
"""Get template for prediction result."""
ret_dict = {
'name': np.zeros(num_samples), 'truncated': np.zeros(num_samples),
'occluded': np.zeros(num_samples), 'alpha': np.zeros(num_samples),
'bbox': np.zeros([num_samples, 4]), 'dimensions': np.zeros([num_samples, 3]),
'location': np.zeros([num_samples, 3]), 'rotation_y': np.zeros(num_samples),
'score': np.zeros(num_samples), 'boxes_lidar': np.zeros([num_samples, 7])
}
return ret_dict
def generate_single_sample_dict(batch_index, box_dict):
"""Get single prediction result."""
pred_scores = box_dict['pred_scores'].cpu().numpy()
pred_boxes = box_dict['pred_boxes'].cpu().numpy()
pred_labels = box_dict['pred_labels'].cpu().numpy()
pred_dict = get_template_prediction(pred_scores.shape[0])
if pred_scores.shape[0] == 0:
return pred_dict
pred_dict['name'] = np.array(class_names)[pred_labels - 1]
pred_dict['score'] = pred_scores
pred_dict['boxes_lidar'] = pred_boxes
return pred_dict
annos = []
for index, box_dict in enumerate(pred_dicts):
frame_id = batch_dict['frame_id'][index]
single_pred_dict = generate_single_sample_dict(index, box_dict)
single_pred_dict['frame_id'] = frame_id
annos.append(single_pred_dict)
if output_path is not None:
cur_det_file = output_path / ('%s.txt' % frame_id)
with open(cur_det_file, 'w') as f:
box_lidar = single_pred_dict['boxes_lidar']
for idx in range(len(box_lidar)):
x, y, z, l, w, h, rt = box_lidar[idx]
print('%s -1 -1 %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f'
% (single_pred_dict['name'][idx], 0,
0, 0, 0, 0,
h, w, l, x, y, z, rt,
single_pred_dict['score'][idx]), file=f)
return annos
def evaluation(self, det_annos, class_names, **kwargs):
"""Evaluation of prediction results."""
if 'annos' not in self.infos[0].keys():
return None, {}
from ..kitti.kitti_object_eval_python import eval as kitti_eval
eval_det_annos = copy.deepcopy(det_annos)
eval_gt_annos = [copy.deepcopy(info['annos']) for info in self.infos]
ap_result_str, ap_dict = kitti_eval.get_kitti_eval_result(eval_gt_annos, eval_det_annos, class_names)
return ap_result_str, ap_dict
def __len__(self):
"""Length."""
if self._merge_all_iters_to_one_epoch:
return len(self.infos) * self.total_epochs
return len(self.infos)
def __getitem__(self, index):
"""Get item."""
# index = 4
if self._merge_all_iters_to_one_epoch:
index = index % len(self.infos)
info = copy.deepcopy(self.infos[index])
sample_idx = info['point_cloud']['lidar_idx']
points = self.get_lidar(sample_idx)
input_dict = {
'points': points,
'frame_id': sample_idx,
}
if 'annos' in info:
annos = info['annos']
gt_names = annos['name']
gt_boxes_lidar = annos["gt_boxes_lidar"]
input_dict.update({
'gt_names': gt_names,
'gt_boxes': gt_boxes_lidar
})
data_dict = self.prepare_data(data_dict=input_dict)
return data_dict
def create_pc_infos(dataset_cfg, class_names, data_path, save_path, status_logging, workers=4):
"""Create point cloud statistics for data augmentations."""
dataset = GeneralPCDataset(dataset_cfg=dataset_cfg, class_names=class_names, root_path=data_path, training=False)
train_split = dataset_cfg.data_split['train']
val_split = dataset_cfg.data_split['test']
train_filename = save_path / (f"infos_{train_split}.pkl")
val_filename = save_path / (f"infos_{val_split}.pkl")
trainval_filename = save_path / (f"infos_{train_split}_{val_split}.pkl")
print('---------------Start to generate data infos---------------')
status_logging.get_status_logger().write(
message="---------------Start to generate data infos---------------",
status_level=status_logging.Status.STARTED
)
dataset.set_split(train_split)
infos_train = dataset.get_infos(num_workers=workers, has_label=True, count_inside_pts=False)
with open(train_filename, 'wb') as f:
pickle.dump(infos_train, f)
print(f'Info train file is saved to {train_filename}')
status_logging.get_status_logger().write(
message='Info train file is saved to %s' % train_filename,
status_level=status_logging.Status.RUNNING
)
dataset.set_split(val_split)
infos_val = dataset.get_infos(num_workers=workers, has_label=True, count_inside_pts=False)
with open(val_filename, 'wb') as f:
pickle.dump(infos_val, f)
print(f'Info val file is saved to {val_filename}')
status_logging.get_status_logger().write(
message='Info val file is saved to %s' % val_filename,
status_level=status_logging.Status.RUNNING
)
with open(trainval_filename, 'wb') as f:
pickle.dump(infos_train + infos_val, f)
print(f'Info trainval file is saved to {trainval_filename}')
status_logging.get_status_logger().write(
message='Info trainval file is saved to %s' % trainval_filename,
status_level=status_logging.Status.RUNNING
)
print('---------------Start create groundtruth database for data augmentation---------------')
status_logging.get_status_logger().write(
message='---------------Start create groundtruth database for data augmentation---------------',
status_level=status_logging.Status.RUNNING
)
dataset.set_split(train_split)
dataset.create_groundtruth_database(train_filename, split=train_split)
print('---------------Data preparation Done---------------')
status_logging.get_status_logger().write(
message='---------------Data preparation Done---------------',
status_level=status_logging.Status.RUNNING
)
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/datasets/general/pc_dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic PointPillars Data Loader."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/datasets/general/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data preprocessor."""
from functools import partial
import numpy as np
from ...utils import box_utils, common_utils
tv = None
try:
import cumm.tensorview as tv
except: # noqa: E722
pass
class VoxelGeneratorWrapper():
"""Voxel Generator Wrapper."""
def __init__(self, vsize_xyz, coors_range_xyz, num_point_features, max_num_points_per_voxel, max_num_voxels):
"""Initialize."""
self.vsize_xyz = vsize_xyz
self.coors_range_xyz = coors_range_xyz
self.num_point_features = num_point_features
self.max_num_points_per_voxel = max_num_points_per_voxel
self.max_num_voxels = max_num_voxels
def generate(self, points):
"""Genrate voxels from points."""
try:
from spconv.utils import VoxelGeneratorV2 as VoxelGenerator
spconv_ver = 1
except: # noqa: E722
try:
from spconv.utils import VoxelGenerator
spconv_ver = 1
except: # noqa: E722
from spconv.utils import Point2VoxelCPU3d as VoxelGenerator
spconv_ver = 2
if spconv_ver == 1:
_voxel_generator = VoxelGenerator(
voxel_size=self.vsize_xyz,
point_cloud_range=self.coors_range_xyz,
max_num_points=self.max_num_points_per_voxel,
max_voxels=self.max_num_voxels
)
else:
_voxel_generator = VoxelGenerator(
vsize_xyz=self.vsize_xyz,
coors_range_xyz=self.coors_range_xyz,
num_point_features=self.num_point_features,
max_num_points_per_voxel=self.max_num_points_per_voxel,
max_num_voxels=self.max_num_voxels
)
if spconv_ver == 1:
voxel_output = _voxel_generator.generate(points)
if isinstance(voxel_output, dict):
voxels, coordinates, num_points = \
voxel_output['voxels'], voxel_output['coordinates'], voxel_output['num_points_per_voxel']
else:
voxels, coordinates, num_points = voxel_output
else:
assert tv is not None, "Unexpected error, library: 'cumm' wasn't imported properly."
voxel_output = _voxel_generator.point_to_voxel(tv.from_numpy(points))
tv_voxels, tv_coordinates, tv_num_points = voxel_output
# make copy with numpy(), since numpy_view() will disappear as soon as the generator is deleted
voxels = tv_voxels.numpy()
coordinates = tv_coordinates.numpy()
num_points = tv_num_points.numpy()
return voxels, coordinates, num_points
class DataProcessor(object):
"""Data Processor."""
def __init__(
self, processor_configs,
point_cloud_range, training,
num_point_features
):
"""Initialize."""
self.point_cloud_range = point_cloud_range
self.training = training
self.mode = 'train' if training else 'test'
self.grid_size = self.voxel_size = None
self.data_processor_queue = []
self.num_point_features = num_point_features
func_map = {key: getattr(DataProcessor, key) for key in vars(DataProcessor) if not key.startswith("__")}
for cur_cfg in processor_configs:
if cur_cfg.name in func_map:
cur_processor = func_map[cur_cfg.name](self, config=cur_cfg)
self.data_processor_queue.append(cur_processor)
def mask_points_and_boxes_outside_range(self, data_dict=None, config=None):
"""Mask points and boxes that are out of range."""
if data_dict is None:
return partial(self.mask_points_and_boxes_outside_range, config=config)
mask = common_utils.mask_points_by_range(data_dict['points'], self.point_cloud_range)
data_dict['points'] = data_dict['points'][mask]
if data_dict.get('gt_boxes', None) is not None and config.remove_outside_boxes and self.training:
mask = box_utils.mask_boxes_outside_range_numpy(
data_dict['gt_boxes'], self.point_cloud_range, min_num_corners=config.get('min_num_corners', 1)
)
data_dict['gt_boxes'] = data_dict['gt_boxes'][mask]
return data_dict
def shuffle_points(self, data_dict=None, config=None):
"""Randomly shuffle points."""
if data_dict is None:
return partial(self.shuffle_points, config=config)
if config.shuffle[self.mode]:
points = data_dict['points']
shuffle_idx = np.random.permutation(points.shape[0])
points = points[shuffle_idx]
data_dict['points'] = points
return data_dict
def transform_points_to_voxels(self, data_dict=None, config=None, voxel_generator=None):
"""Transform points to voxels."""
if data_dict is None:
voxel_generator = VoxelGeneratorWrapper(
vsize_xyz=config.voxel_size,
coors_range_xyz=self.point_cloud_range,
num_point_features=self.num_point_features,
max_num_points_per_voxel=config.max_points_per_voxel,
max_num_voxels=config.max_number_of_voxels[self.mode],
)
grid_size = (self.point_cloud_range[3:6] - self.point_cloud_range[0:3]) / np.array(config.voxel_size)
self.grid_size = np.round(grid_size).astype(np.int64)
self.voxel_size = config.voxel_size
return partial(self.transform_points_to_voxels, voxel_generator=voxel_generator)
points = data_dict['points']
voxel_output = voxel_generator.generate(points)
voxels, coordinates, num_points = voxel_output
if not data_dict['use_lead_xyz']:
voxels = voxels[..., 3:] # remove xyz in voxels(N, 3)
data_dict['voxels'] = voxels
data_dict['voxel_coords'] = coordinates
data_dict['voxel_num_points'] = num_points
return data_dict
def sample_points(self, data_dict=None, config=None):
"""Sample points."""
if data_dict is None:
return partial(self.sample_points, config=config)
num_points = config.NUM_POINTS[self.mode]
if num_points == -1:
return data_dict
points = data_dict['points']
if num_points < len(points):
pts_depth = np.linalg.norm(points[:, 0:3], axis=1)
pts_near_flag = pts_depth < 40.0
far_idxs_choice = np.where(pts_near_flag == 0)[0]
near_idxs = np.where(pts_near_flag == 1)[0]
near_idxs_choice = np.random.choice(near_idxs, num_points - len(far_idxs_choice), replace=False)
choice = np.concatenate((near_idxs_choice, far_idxs_choice), axis=0) \
if len(far_idxs_choice) > 0 else near_idxs_choice
np.random.shuffle(choice)
else:
choice = np.arange(0, len(points), dtype=np.int32)
if num_points > len(points):
extra_choice = np.random.choice(choice, num_points - len(points), replace=False)
choice = np.concatenate((choice, extra_choice), axis=0)
np.random.shuffle(choice)
data_dict['points'] = points[choice]
return data_dict
def forward(self, data_dict):
"""
Args:
data_dict:
points: (N, 3 + C_in)
gt_boxes: optional, (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
gt_names: optional, (N), string
...
Returns:
"""
for cur_processor in self.data_processor_queue:
data_dict = cur_processor(data_dict=data_dict)
return data_dict
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/datasets/processor/data_processor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data processor."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/datasets/processor/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Point Feature Encoder."""
import numpy as np
class PointFeatureEncoder(object):
"""Point Feature Encoder class."""
def __init__(self, config, point_cloud_range=None):
"""Initialize."""
super().__init__()
self.point_encoding_config = config
assert list(self.point_encoding_config.src_feature_list[0:3]) == ['x', 'y', 'z']
self.used_feature_list = self.point_encoding_config.used_feature_list
self.src_feature_list = self.point_encoding_config.src_feature_list
self.point_cloud_range = point_cloud_range
@property
def num_point_features(self):
"""Number of point features."""
return getattr(self, self.point_encoding_config.encoding_type)(points=None)
def forward(self, data_dict):
"""
Args:
data_dict:
points: (N, 3 + C_in)
...
Returns:
data_dict:
points: (N, 3 + C_out),
use_lead_xyz: whether to use xyz as point-wise features
...
"""
func_map = {key: getattr(PointFeatureEncoder, key) for key in vars(PointFeatureEncoder) if not key.startswith("__") and key.endswith("_encoding")}
if self.point_encoding_config.encoding_type in func_map:
data_dict['points'], use_lead_xyz = func_map[self.point_encoding_config.encoding_type](self, data_dict['points'])
data_dict['use_lead_xyz'] = use_lead_xyz
return data_dict
return None
def absolute_coordinates_encoding(self, points=None):
"""Absolute coordinates encoding."""
if points is None:
num_output_features = len(self.used_feature_list)
return num_output_features
point_feature_list = [points[:, 0:3]]
for x in self.used_feature_list:
if x in ['x', 'y', 'z']:
continue
idx = self.src_feature_list.index(x)
point_feature_list.append(points[:, idx:idx + 1])
point_features = np.concatenate(point_feature_list, axis=1)
return point_features, True
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/datasets/processor/point_feature_encoder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""KITTI dataset module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/datasets/kitti/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""KITTI data loader."""
import copy
import pickle
import numpy as np
from skimage import io
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...utils import box_utils, calibration_kitti, common_utils, object3d_kitti, safe_unpickler
from ..dataset import DatasetTemplate
from nvidia_tao_pytorch.core.path_utils import expand_path
class KittiDataset(DatasetTemplate):
"""KITTI dataset."""
def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None):
"""
Args:
root_path:
dataset_cfg:
class_names:
training:
logger:
"""
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
)
self.split = self.dataset_cfg.data_split[self.mode]
self.root_split_path = self.root_path / ('training' if self.split != 'test' else 'testing')
split_dir = self.root_path / 'ImageSets' / (self.split + '.txt')
self.sample_id_list = [x.strip() for x in open(split_dir).readlines()] if split_dir.exists() else None
self.kitti_infos = []
self.include_kitti_data(self.mode)
def include_kitti_data(self, mode):
"""Include KITTI data files."""
if self.logger is not None:
self.logger.info('Loading KITTI dataset')
kitti_infos = []
for info_path in self.dataset_cfg.info_path[mode]:
info_path = self.root_path / info_path
if not info_path.exists():
continue
with open(info_path, 'rb') as f:
infos = pickle.load(f)
kitti_infos.extend(infos)
self.kitti_infos.extend(kitti_infos)
if self.logger is not None:
self.logger.info('Total samples for KITTI dataset: %d' % (len(kitti_infos)))
def set_split(self, split):
"""Set train/val splits."""
super().__init__(
dataset_cfg=self.dataset_cfg, class_names=self.class_names, training=self.training, root_path=self.root_path, logger=self.logger
)
self.split = split
self.root_split_path = self.root_path / ('training' if self.split != 'test' else 'testing')
split_dir = expand_path(self.root_path / 'ImageSets' / (self.split + '.txt'))
self.sample_id_list = [x.strip() for x in open(split_dir).readlines()] if split_dir.exists() else None
def get_lidar(self, idx):
"""Get LIDAR points."""
lidar_file = self.root_split_path / 'velodyne' / ('%s.bin' % idx)
assert lidar_file.exists()
return np.fromfile(str(lidar_file), dtype=np.float32).reshape(-1, 4)
def get_image_shape(self, idx):
"""Get image shape."""
img_file = self.root_split_path / 'image_2' / ('%s.png' % idx)
assert img_file.exists()
return np.array(io.imread(img_file).shape[:2], dtype=np.int32)
def get_label(self, idx):
"""Get label."""
label_file = self.root_split_path / 'label_2' / ('%s.txt' % idx)
assert label_file.exists()
return object3d_kitti.get_objects_from_label(label_file)
def get_calib(self, idx):
"""Get calibration."""
calib_file = self.root_split_path / 'calib' / ('%s.txt' % idx)
assert calib_file.exists()
return calibration_kitti.Calibration(calib_file)
def get_road_plane(self, idx):
"""Get road plane."""
plane_file = self.root_split_path / 'planes' / ('%s.txt' % idx)
if not plane_file.exists():
return None
with open(plane_file, 'r') as f:
lines = f.readlines()
lines = [float(i) for i in lines[3].split()]
plane = np.asarray(lines)
# Ensure normal is always facing up, this is in the rectified camera coordinate
if plane[1] > 0:
plane = -plane
norm = np.linalg.norm(plane[0:3])
plane = plane / norm
return plane
@staticmethod
def get_fov_flag(pts_rect, img_shape, calib):
"""
Args:
pts_rect:
img_shape:
calib:
Returns:
"""
pts_img, pts_rect_depth = calib.rect_to_img(pts_rect)
val_flag_1 = np.logical_and(pts_img[:, 0] >= 0, pts_img[:, 0] < img_shape[1])
val_flag_2 = np.logical_and(pts_img[:, 1] >= 0, pts_img[:, 1] < img_shape[0])
val_flag_merge = np.logical_and(val_flag_1, val_flag_2)
pts_valid_flag = np.logical_and(val_flag_merge, pts_rect_depth >= 0)
return pts_valid_flag
def get_infos(self, num_workers=4, has_label=True, count_inside_pts=True, sample_id_list=None):
"""Get infos."""
import concurrent.futures as futures
def process_single_scene(sample_idx):
"""Process single scene."""
print(f'{self.split} sample_idx: {sample_idx}')
info = {}
pc_info = {'num_features': 4, 'lidar_idx': sample_idx}
info['point_cloud'] = pc_info
image_info = {'image_idx': sample_idx, 'image_shape': self.get_image_shape(sample_idx)}
info['image'] = image_info
calib = self.get_calib(sample_idx)
P2 = np.concatenate([calib.P2, np.array([[0., 0., 0., 1.]])], axis=0)
R0_4x4 = np.zeros([4, 4], dtype=calib.R0.dtype)
R0_4x4[3, 3] = 1.
R0_4x4[:3, :3] = calib.R0
V2C_4x4 = np.concatenate([calib.V2C, np.array([[0., 0., 0., 1.]])], axis=0)
calib_info = {'P2': P2, 'R0_rect': R0_4x4, 'Tr_velo_to_cam': V2C_4x4}
info['calib'] = calib_info
if has_label:
obj_list = self.get_label(sample_idx)
annotations = {}
annotations['name'] = np.array([obj.cls_type for obj in obj_list])
annotations['truncated'] = np.array([obj.truncation for obj in obj_list])
annotations['occluded'] = np.array([obj.occlusion for obj in obj_list])
annotations['alpha'] = np.array([obj.alpha for obj in obj_list])
annotations['bbox'] = np.concatenate([obj.box2d.reshape(1, 4) for obj in obj_list], axis=0)
annotations['dimensions'] = np.array([[obj.l, obj.h, obj.w] for obj in obj_list]) # lhw(camera) format
annotations['location'] = np.concatenate([obj.loc.reshape(1, 3) for obj in obj_list], axis=0)
annotations['rotation_y'] = np.array([obj.ry for obj in obj_list])
annotations['score'] = np.array([obj.score for obj in obj_list])
annotations['difficulty'] = np.array([obj.level for obj in obj_list], np.int32)
num_objects = len([obj.cls_type for obj in obj_list if obj.cls_type != 'DontCare'])
num_gt = len(annotations['name'])
index = list(range(num_objects)) + [-1] * (num_gt - num_objects)
annotations['index'] = np.array(index, dtype=np.int32)
loc = annotations['location'][:num_objects]
dims = annotations['dimensions'][:num_objects]
rots = annotations['rotation_y'][:num_objects]
loc_lidar = calib.rect_to_lidar(loc)
l, h, w = dims[:, 0:1], dims[:, 1:2], dims[:, 2:3]
loc_lidar[:, 2] += h[:, 0] / 2
gt_boxes_lidar = np.concatenate([loc_lidar, l, w, h, -(np.pi / 2 + rots[..., np.newaxis])], axis=1)
annotations['gt_boxes_lidar'] = gt_boxes_lidar
info['annos'] = annotations
if count_inside_pts:
points = self.get_lidar(sample_idx)
calib = self.get_calib(sample_idx)
pts_rect = calib.lidar_to_rect(points[:, 0:3])
fov_flag = self.get_fov_flag(pts_rect, info['image']['image_shape'], calib)
pts_fov = points[fov_flag]
corners_lidar = box_utils.boxes_to_corners_3d(gt_boxes_lidar)
num_points_in_gt = -np.ones(num_gt, dtype=np.int32)
for k in range(num_objects):
flag = box_utils.in_hull(pts_fov[:, 0:3], corners_lidar[k])
num_points_in_gt[k] = flag.sum()
annotations['num_points_in_gt'] = num_points_in_gt
return info
sample_id_list = sample_id_list if sample_id_list is not None else self.sample_id_list
with futures.ThreadPoolExecutor(num_workers) as executor:
infos = executor.map(process_single_scene, sample_id_list)
return list(infos)
def create_groundtruth_database(self, info_path=None, used_classes=None, split='train'):
"""Create groudtruth database."""
import torch
from pathlib import Path
database_save_path = Path(self.root_path) / ('gt_database' if split == 'train' else ('gt_database_%s' % split))
db_info_save_path = Path(self.root_path) / ('kitti_dbinfos_%s.pkl' % split)
database_save_path.mkdir(parents=True, exist_ok=True)
all_db_infos = {}
with open(info_path, 'rb') as f:
infos = safe_unpickler.SafeUnpickler(f.read(), KittiDataset).load() # Deserialize the data using the "SafeUnpickler" class to avoid unsafe deserializations.
# infos = pickle.load(f)
for k in range(len(infos)):
print('gt_database sample: %d/%d' % (k + 1, len(infos)))
info = infos[k]
sample_idx = info['point_cloud']['lidar_idx']
points = self.get_lidar(sample_idx)
annos = info['annos']
names = annos['name']
difficulty = annos['difficulty']
bbox = annos['bbox']
gt_boxes = annos['gt_boxes_lidar']
num_obj = gt_boxes.shape[0]
point_indices = roiaware_pool3d_utils.points_in_boxes_cpu(
torch.from_numpy(points[:, 0:3]), torch.from_numpy(gt_boxes)
).numpy() # (nboxes, npoints)
for i in range(num_obj):
filename = '%s_%s_%d.bin' % (sample_idx, names[i], i)
filepath = database_save_path / filename
gt_points = points[point_indices[i] > 0]
gt_points[:, :3] -= gt_boxes[i, :3]
with open(filepath, 'w') as f:
gt_points.tofile(f)
if (used_classes is None) or names[i] in used_classes:
db_path = str(filepath.relative_to(self.root_path)) # gt_database/xxxxx.bin
db_info = {'name': names[i], 'path': db_path, 'image_idx': sample_idx, 'gt_idx': i,
'box3d_lidar': gt_boxes[i], 'num_points_in_gt': gt_points.shape[0],
'difficulty': difficulty[i], 'bbox': bbox[i], 'score': annos['score'][i]}
if names[i] in all_db_infos:
all_db_infos[names[i]].append(db_info)
else:
all_db_infos[names[i]] = [db_info]
for k, v in all_db_infos.items():
print(f'Database {k}: {len(v)}')
with open(db_info_save_path, 'wb') as f:
pickle.dump(all_db_infos, f)
@staticmethod
def generate_prediction_dicts(batch_dict, pred_dicts, class_names, output_path=None):
"""
Args:
batch_dict:
frame_id:
pred_dicts: list of pred_dicts
pred_boxes: (N, 7), Tensor
pred_scores: (N), Tensor
pred_labels: (N), Tensor
class_names:
output_path:
Returns:
"""
def get_template_prediction(num_samples):
"""Get template for prediction result."""
ret_dict = {
'name': np.zeros(num_samples), 'truncated': np.zeros(num_samples),
'occluded': np.zeros(num_samples), 'alpha': np.zeros(num_samples),
'bbox': np.zeros([num_samples, 4]), 'dimensions': np.zeros([num_samples, 3]),
'location': np.zeros([num_samples, 3]), 'rotation_y': np.zeros(num_samples),
'score': np.zeros(num_samples), 'boxes_lidar': np.zeros([num_samples, 7])
}
return ret_dict
def generate_single_sample_dict(batch_index, box_dict):
"""Generate single prediction."""
pred_scores = box_dict['pred_scores'].cpu().numpy()
pred_boxes = box_dict['pred_boxes'].cpu().numpy()
pred_labels = box_dict['pred_labels'].cpu().numpy()
pred_dict = get_template_prediction(pred_scores.shape[0])
if pred_scores.shape[0] == 0:
return pred_dict
calib = batch_dict['calib'][batch_index]
image_shape = batch_dict['image_shape'][batch_index]
pred_boxes_camera = box_utils.boxes3d_lidar_to_kitti_camera(pred_boxes, calib)
pred_boxes_img = box_utils.boxes3d_kitti_camera_to_imageboxes(
pred_boxes_camera, calib, image_shape=image_shape
)
pred_dict['name'] = np.array(class_names)[pred_labels - 1]
pred_dict['alpha'] = -np.arctan2(-pred_boxes[:, 1], pred_boxes[:, 0]) + pred_boxes_camera[:, 6]
pred_dict['bbox'] = pred_boxes_img
pred_dict['dimensions'] = pred_boxes_camera[:, 3:6]
pred_dict['location'] = pred_boxes_camera[:, 0:3]
pred_dict['rotation_y'] = pred_boxes_camera[:, 6]
pred_dict['score'] = pred_scores
pred_dict['boxes_lidar'] = pred_boxes
return pred_dict
annos = []
for index, box_dict in enumerate(pred_dicts):
frame_id = batch_dict['frame_id'][index]
single_pred_dict = generate_single_sample_dict(index, box_dict)
single_pred_dict['frame_id'] = frame_id
annos.append(single_pred_dict)
if output_path is not None:
cur_det_file = output_path / ('%s.txt' % frame_id)
with open(cur_det_file, 'w') as f:
bbox = single_pred_dict['bbox']
loc = single_pred_dict['location']
dims = single_pred_dict['dimensions'] # lhw -> hwl
for idx in range(len(bbox)):
print('%s -1 -1 %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f'
% (single_pred_dict['name'][idx], single_pred_dict['alpha'][idx],
bbox[idx][0], bbox[idx][1], bbox[idx][2], bbox[idx][3],
dims[idx][1], dims[idx][2], dims[idx][0], loc[idx][0],
loc[idx][1], loc[idx][2], single_pred_dict['rotation_y'][idx],
single_pred_dict['score'][idx]), file=f)
return annos
def evaluation(self, det_annos, class_names, **kwargs):
"""Evaluate on dataset."""
if 'annos' not in self.kitti_infos[0].keys():
return None, {}
from .kitti_object_eval_python import eval as kitti_eval
eval_det_annos = copy.deepcopy(det_annos)
eval_gt_annos = [copy.deepcopy(info['annos']) for info in self.kitti_infos]
ap_result_str, ap_dict = kitti_eval.get_official_eval_result(eval_gt_annos, eval_det_annos, class_names)
return ap_result_str, ap_dict
def __len__(self):
"""Length."""
if self._merge_all_iters_to_one_epoch:
return len(self.kitti_infos) * self.total_epochs
return len(self.kitti_infos)
def __getitem__(self, index):
"""Get item."""
# index = 4
if self._merge_all_iters_to_one_epoch:
index = index % len(self.kitti_infos)
info = copy.deepcopy(self.kitti_infos[index])
sample_idx = info['point_cloud']['lidar_idx']
points = self.get_lidar(sample_idx)
calib = self.get_calib(sample_idx)
img_shape = info['image']['image_shape']
if self.dataset_cfg.FOV_POINTS_ONLY:
pts_rect = calib.lidar_to_rect(points[:, 0:3])
fov_flag = self.get_fov_flag(pts_rect, img_shape, calib)
points = points[fov_flag]
input_dict = {
'points': points,
'frame_id': sample_idx,
'calib': calib,
}
if 'annos' in info:
annos = info['annos']
annos = common_utils.drop_info_with_name(annos, name='DontCare')
loc, dims, rots = annos['location'], annos['dimensions'], annos['rotation_y']
gt_names = annos['name']
gt_boxes_camera = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1).astype(np.float32)
gt_boxes_lidar = box_utils.boxes3d_kitti_camera_to_lidar(gt_boxes_camera, calib)
input_dict.update({
'gt_names': gt_names,
'gt_boxes': gt_boxes_lidar
})
road_plane = self.get_road_plane(sample_idx)
if road_plane is not None:
input_dict['road_plane'] = road_plane
data_dict = self.prepare_data(data_dict=input_dict)
data_dict['image_shape'] = img_shape
return data_dict
def create_kitti_infos(dataset_cfg, class_names, data_path, save_path, workers=4):
"""Create KITTI infos for data augmentatiions."""
dataset = KittiDataset(dataset_cfg=dataset_cfg, class_names=class_names, root_path=data_path, training=False)
train_split, val_split = 'train', 'val'
train_filename = save_path / ('kitti_infos_%s.pkl' % train_split)
val_filename = save_path / ('kitti_infos_%s.pkl' % val_split)
trainval_filename = save_path / 'kitti_infos_trainval.pkl'
test_filename = save_path / 'kitti_infos_test.pkl'
print('---------------Start to generate data infos---------------')
dataset.set_split(train_split)
kitti_infos_train = dataset.get_infos(num_workers=workers, has_label=True, count_inside_pts=True)
with open(train_filename, 'wb') as f:
pickle.dump(kitti_infos_train, f)
print(f'Kitti info train file is saved to {train_filename}')
dataset.set_split(val_split)
kitti_infos_val = dataset.get_infos(num_workers=workers, has_label=True, count_inside_pts=True)
with open(val_filename, 'wb') as f:
pickle.dump(kitti_infos_val, f)
print(f'Kitti info val file is saved to {val_filename}')
with open(trainval_filename, 'wb') as f:
pickle.dump(kitti_infos_train + kitti_infos_val, f)
print(f'Kitti info trainval file is saved to {trainval_filename}')
dataset.set_split('test')
kitti_infos_test = dataset.get_infos(num_workers=workers, has_label=False, count_inside_pts=False)
with open(test_filename, 'wb') as f:
pickle.dump(kitti_infos_test, f)
print(f'Kitti info test file is saved to {test_filename}')
print('---------------Start create groundtruth database for data augmentation---------------')
dataset.set_split(train_split)
dataset.create_groundtruth_database(train_filename, split=train_split)
print('---------------Data preparation Done---------------')
if __name__ == '__main__':
import sys
if sys.argv.__len__() > 1 and sys.argv[1] == 'create_kitti_infos':
import yaml
from pathlib import Path
from easydict import EasyDict
yaml_file = expand_path(sys.argv[2])
dataset_cfg = EasyDict(yaml.load(open(yaml_file), Loader=yaml.FullLoader)) # nosec
ROOT_DIR = (Path(__file__).resolve().parent / '../../../').resolve()
create_kitti_infos(
dataset_cfg=dataset_cfg,
class_names=['Car', 'Pedestrian', 'Cyclist'],
data_path=ROOT_DIR / 'data' / 'kitti',
save_path=ROOT_DIR / 'data' / 'kitti'
)
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/datasets/kitti/kitti_dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluation utils for KITTI dataset."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/datasets/kitti/kitti_object_eval_python/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyCUDA implementation of IoU of rotated boxes."""
import numpy as np
import pycuda # noqa pylint: disable=W0611
import pycuda.autoinit # noqa pylint: disable=W0611
from pycuda.compiler import SourceModule
import pycuda.driver as cuda
pyc_dev = pycuda.autoinit.device
pyc_ctx = pyc_dev.retain_primary_context()
mod = SourceModule("""
//PyCUDA implementation of IoU of rotated boxes. This is basically a C++/CUDA
// translation of the original numpy/numba based implementation.
// area of trangle by cross product: S = |ca| * |cb| * sin(theta)/2 = ||ca x cb||/2
__device__ float trangle_area(float *a, float *b, float *c)
{
return ((a[0] - c[0]) * (b[1] - c[1]) - (a[1] - c[1]) *
(b[0] - c[0])) / 2.0;
}
// area of polygons as the sum of areas of triangles
__device__ float area(float *int_pts, int num_of_inter)
{
float area_val = 0.0;
for (int i=0; i<num_of_inter - 2; i++)
{
area_val += fabsf(
trangle_area( int_pts, //int_pts[:2],
&int_pts[2 * i + 2],
&int_pts[2 * i + 4]));
}
return area_val;
}
// sort the vertices in a convex polygon
__device__ void sort_vertex_in_convex_polygon(float *int_pts, int num_of_inter)
{
if (num_of_inter > 0)
{
float center[2] ={0.0,0.0};
//center[:] = 0.0
for (int i=0; i<num_of_inter; i++)
{
center[0] += int_pts[2 * i];
center[1] += int_pts[2 * i + 1];
}
center[0] /= num_of_inter;
center[1] /= num_of_inter;
float v[2];
float vs[16];
for (int i=0; i<num_of_inter; i++)
{
v[0] = int_pts[2 * i] - center[0];
v[1] = int_pts[2 * i + 1] - center[1];
float d = sqrtf(v[0] * v[0] + v[1] * v[1]);
v[0] = v[0] / d;
v[1] = v[1] / d;
if (v[1] < 0)
v[0] = -2 - v[0];
vs[i] = v[0];
}
int j = 0;
float temp = 0;
for (int i=0; i<num_of_inter; i++)
{
if (vs[i - 1] > vs[i])
{
temp = vs[i];
float tx = int_pts[2 * i];
float ty = int_pts[2 * i + 1];
j = i;
while (j > 0 && (vs[j - 1] > temp) )
{
vs[j] = vs[j - 1];
int_pts[j * 2] = int_pts[j * 2 - 2];
int_pts[j * 2 + 1] = int_pts[j * 2 - 1];
j -= 1;
}
vs[j] = temp;
int_pts[j * 2] = tx;
int_pts[j * 2 + 1] = ty;
}
}
}
}
// intersection of two line segments
__device__ int line_segment_intersection(float *pts1, float *pts2, int i, int j, float *temp_pts)
{
float A[2];
float B[2];
float C[2];
float D[2];
A[0] = pts1[2 * i];
A[1] = pts1[2 * i + 1];
B[0] = pts1[2 * ((i + 1) % 4)];
B[1] = pts1[2 * ((i + 1) % 4) + 1];
C[0] = pts2[2 * j];
C[1] = pts2[2 * j + 1];
D[0] = pts2[2 * ((j + 1) % 4)];
D[1] = pts2[2 * ((j + 1) % 4) + 1];
float BA0 = B[0] - A[0];
float BA1 = B[1] - A[1];
float DA0 = D[0] - A[0];
float CA0 = C[0] - A[0];
float DA1 = D[1] - A[1];
float CA1 = C[1] - A[1];
float acd = DA1 * CA0 > CA1 * DA0;
float bcd = (D[1] - B[1]) * (C[0] - B[0]) > (C[1] - B[1]) * (D[0] - B[0]);
if (acd != bcd)
{
float abc = CA1 * BA0 > BA1 * CA0;
float abd = DA1 * BA0 > BA1 * DA0;
if (abc != abd)
{
float DC0 = D[0] - C[0];
float DC1 = D[1] - C[1];
float ABBA = A[0] * B[1] - B[0] * A[1];
float CDDC = C[0] * D[1] - D[0] * C[1];
float DH = BA1 * DC0 - BA0 * DC1;
float Dx = ABBA * DC0 - BA0 * CDDC;
float Dy = ABBA * DC1 - BA1 * CDDC;
temp_pts[0] = Dx / DH;
temp_pts[1] = Dy / DH;
return 1;
}
}
return 0;
}
// whether point q is on line segment of (p1, p2) and in between the 2 points on the line
__device__ int on_segment(float p1_x, float p1_y, float p2_x, float p2_y, float q_x, float q_y)
{
return (
( (q_x - p1_x) * (p2_y - p1_y) == (p2_x - p1_x) * (q_y - p1_y) ) &&
( min(p1_x, p2_x) <= q_x ) &&
( q_x <= max(p1_x, p2_x) ) &&
( min(p1_y, p2_y) <= q_y ) &&
( q_y <= max(p1_y, p2_y) )
);
}
// whether a point is in a quadrilateral
__device__ int in_quadrilateral(float pt_x, float pt_y, float *corners)
{
int flag = 0;
int j=0;
float a_x, a_y, b_x, b_y;
for (int i=0; i<4; i++)
{
j = (i + 1) % 4;
a_x = corners[2 * i];
a_y = corners[2 * i + 1];
b_x = corners[2 * j];
b_y = corners[2 * j + 1];
if (on_segment(a_x, a_y, b_x, b_y, pt_x, pt_y))
return 1;
if (
(((a_y - pt_y) > 0) != ((b_y - pt_y) > 0)) &&
pt_x - (pt_y - a_y) * (a_x - b_x) / (a_y - b_y) - a_x < 0
)
flag = ! flag;
}
return flag;
}
// intersection of 2 quadrilaterals
__device__ int quadrilateral_intersection(float *pts1, float *pts2, float *int_pts)
{
int num_of_inter = 0;
float temp_pts[2];
int has_pts;
for (int i=0; i< 4; i++)
{
if ( in_quadrilateral(pts1[2 * i], pts1[2 * i + 1], pts2) )
{
int_pts[num_of_inter * 2] = pts1[2 * i];
int_pts[num_of_inter * 2 + 1] = pts1[2 * i + 1];
num_of_inter += 1;
}
if ( in_quadrilateral(pts2[2 * i], pts2[2 * i + 1], pts1) )
{
int_pts[num_of_inter * 2] = pts2[2 * i];
int_pts[num_of_inter * 2 + 1] = pts2[2 * i + 1];
num_of_inter += 1;
}
}
for (int i=0; i<4; i++)
{
for (int j=0; j<4; j++)
{
int has_pts = line_segment_intersection(pts1, pts2, i, j, temp_pts);
if (has_pts)
{
int_pts[num_of_inter * 2] = temp_pts[0];
int_pts[num_of_inter * 2 + 1] = temp_pts[1];
num_of_inter += 1;
}
}
}
return num_of_inter;
}
// convert rotated boxes to corners format
__device__ void rbbox_to_corners(float *corners, float *rbbox)
{
float angle = rbbox[4];
float a_cos = cosf(angle);
float a_sin = sinf(angle);
float center_x = rbbox[0];
float center_y = rbbox[1];
float x_d = rbbox[2];
float y_d = rbbox[3];
float corners_x[4];
float corners_y[4];
corners_x[0] = -x_d / 2;
corners_x[1] = -x_d / 2;
corners_x[2] = x_d / 2;
corners_x[3] = x_d / 2;
corners_y[0] = -y_d / 2;
corners_y[1] = y_d / 2;
corners_y[2] = y_d / 2;
corners_y[3] = -y_d / 2;
for (int i=0; i<4; i++)
{
corners[2 *
i] = a_cos * corners_x[i] + a_sin * corners_y[i] + center_x;
corners[2 * i
+ 1] = -a_sin * corners_x[i] + a_cos * corners_y[i] + center_y;
}
}
// intersection of 2 rorated boxes
__device__ float inter(float *rbbox1, float *rbbox2)
{
float corners1[8];
float corners2[8];
float intersection_corners[16];
int num_intersection;
rbbox_to_corners(corners1, rbbox1);
rbbox_to_corners(corners2, rbbox2);
num_intersection = quadrilateral_intersection(corners1, corners2,
intersection_corners);
sort_vertex_in_convex_polygon(intersection_corners, num_intersection);
return area(intersection_corners, num_intersection);
}
// compute IoU of a pair of rotated boxes
__device__ float devRotateIoUEval(float *rbox1, float *rbox2, int criterion)
{
//centerx, centery, widthx, heighty, angle
float area1 = rbox1[2] * rbox1[3];
float area2 = rbox2[2] * rbox2[3];
float area_inter = inter(rbox1, rbox2);
if (criterion == -1)
return area_inter / (area1 + area2 - area_inter);
if (criterion == 0)
return area_inter / area1;
if (criterion == 1)
return area_inter / area2;
return area_inter;
}
// CUDA kernel to compute IoU of multiple rotated boxes
__global__ void rotate_iou_gpu_eval(float *q, float *b, int q_max, int b_max, float *output, int criterion=-1)
{
const int block_size=64;
const int point_len = 5;
extern __shared__ float block_boxes[block_size*point_len];
extern __shared__ float block_qboxes[block_size*point_len];
int block_row_index=blockIdx.x;
int block_col_index=blockIdx.y;
int tx=threadIdx.x;
float *b_addr_this_block=b + block_row_index * block_size * point_len;
float *q_addr_this_block=q + block_col_index * block_size * point_len;
int b_valid_len_this_block = b_max - block_row_index*block_size < block_size ? (b_max - block_row_index*block_size) : block_size;
int q_valid_len_this_block = q_max - block_col_index*block_size < block_size ? (q_max - block_col_index*block_size) : block_size;
if (tx < b_valid_len_this_block)
{
for (int i=0; i<point_len; i++)
{
block_boxes[tx * point_len + i] = b_addr_this_block[tx * point_len + i];
}
}
if (tx < q_valid_len_this_block)
{
for (int i=0; i<point_len; i++)
{
block_qboxes[tx * point_len + i] = q_addr_this_block[tx * point_len + i];
}
}
__syncthreads();
if (tx<b_valid_len_this_block)
{
int rows_index=block_row_index * block_size + tx;
int cols_index=block_col_index * block_size;
for (int i=0;i<q_valid_len_this_block;i++)
{
(output + rows_index* q_max + cols_index + i)[0] = devRotateIoUEval(&block_boxes[point_len*tx], &block_qboxes[point_len * i], criterion);
}
}
}
""")
def div_up(m, n):
"""Division with round to up."""
return m // n + (m % n > 0)
def rotate_iou_gpu_eval(box_np, query_np, criterion=-1):
"""IoU of rotated boxes."""
pyc_ctx.push()
box_np = box_np.astype(np.float32, order='C')
query_np = query_np.astype(np.float32, order='C')
N = box_np.shape[0]
K = query_np.shape[0]
iou_np = np.zeros((N, K), dtype=np.float32, order='C')
threadsPerBlock = 8 * 8
blockspergrid = (div_up(N, threadsPerBlock), div_up(K, threadsPerBlock))
func = mod.get_function("rotate_iou_gpu_eval")
func(
cuda.In(query_np),
cuda.In(box_np),
np.int32(K),
np.int32(N),
cuda.Out(iou_np),
np.int32(criterion),
grid=blockspergrid,
block=(threadsPerBlock, 1, 1)
)
pyc_ctx.pop()
return iou_np
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/datasets/kitti/kitti_object_eval_python/rotate_iou_pycuda.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utils for KITTI data loader."""
import concurrent.futures as futures
import pathlib
import re
from collections import OrderedDict
import numpy as np
from skimage import io
def get_image_index_str(img_idx):
"""Get image index string."""
return "{:06d}".format(img_idx)
def get_kitti_info_path(idx,
prefix,
info_type='image_2',
file_tail='.png',
training=True,
relative_path=True):
"""Get KITTI info path."""
img_idx_str = get_image_index_str(idx)
img_idx_str += file_tail
prefix = pathlib.Path(prefix)
if training:
file_path = pathlib.Path('training') / info_type / img_idx_str
else:
file_path = pathlib.Path('testing') / info_type / img_idx_str
if not (prefix / file_path).exists():
raise ValueError("file not exist: {}".format(file_path))
if relative_path:
return str(file_path)
return str(prefix / file_path)
def get_image_path(idx, prefix, training=True, relative_path=True):
"""Get image path."""
return get_kitti_info_path(idx, prefix, 'image_2', '.png', training,
relative_path)
def get_label_path(idx, prefix, training=True, relative_path=True):
"""Get label path."""
return get_kitti_info_path(idx, prefix, 'label_2', '.txt', training,
relative_path)
def get_velodyne_path(idx, prefix, training=True, relative_path=True):
"""Get velodyne path."""
return get_kitti_info_path(idx, prefix, 'velodyne', '.bin', training,
relative_path)
def get_calib_path(idx, prefix, training=True, relative_path=True):
"""Get calibration path."""
return get_kitti_info_path(idx, prefix, 'calib', '.txt', training,
relative_path)
def _extend_matrix(mat):
mat = np.concatenate([mat, np.array([[0., 0., 0., 1.]])], axis=0)
return mat
def get_kitti_image_info(path,
training=True,
label_info=True,
velodyne=False,
calib=False,
image_ids=7481,
extend_matrix=True,
num_worker=8,
relative_path=True,
with_imageshape=True):
"""Get KITTI image info."""
root_path = pathlib.Path(path)
if not isinstance(image_ids, list):
image_ids = list(range(image_ids))
def map_func(idx):
"""Map function."""
image_info = {'image_idx': idx}
annotations = None
if velodyne:
image_info['velodyne_path'] = get_velodyne_path(
idx, path, training, relative_path)
image_info['img_path'] = get_image_path(idx, path, training,
relative_path)
if with_imageshape:
img_path = image_info['img_path']
if relative_path:
img_path = str(root_path / img_path)
image_info['img_shape'] = np.array(
io.imread(img_path).shape[:2], dtype=np.int32)
if label_info:
label_path = get_label_path(idx, path, training, relative_path)
if relative_path:
label_path = str(root_path / label_path)
annotations = get_label_anno(label_path)
if calib:
calib_path = get_calib_path(
idx, path, training, relative_path=False)
with open(calib_path, 'r') as f:
lines = f.readlines()
P0 = np.array(
[float(info) for info in lines[0].split(' ')[1:13]]).reshape(
[3, 4])
P1 = np.array(
[float(info) for info in lines[1].split(' ')[1:13]]).reshape(
[3, 4])
P2 = np.array(
[float(info) for info in lines[2].split(' ')[1:13]]).reshape(
[3, 4])
P3 = np.array(
[float(info) for info in lines[3].split(' ')[1:13]]).reshape(
[3, 4])
if extend_matrix:
P0 = _extend_matrix(P0)
P1 = _extend_matrix(P1)
P2 = _extend_matrix(P2)
P3 = _extend_matrix(P3)
image_info['calib/P0'] = P0
image_info['calib/P1'] = P1
image_info['calib/P2'] = P2
image_info['calib/P3'] = P3
R0_rect = np.array([
float(info) for info in lines[4].split(' ')[1:10]
]).reshape([3, 3])
if extend_matrix:
rect_4x4 = np.zeros([4, 4], dtype=R0_rect.dtype)
rect_4x4[3, 3] = 1.
rect_4x4[:3, :3] = R0_rect
else:
rect_4x4 = R0_rect
image_info['calib/R0_rect'] = rect_4x4
Tr_velo_to_cam = np.array([
float(info) for info in lines[5].split(' ')[1:13]
]).reshape([3, 4])
Tr_imu_to_velo = np.array([
float(info) for info in lines[6].split(' ')[1:13]
]).reshape([3, 4])
if extend_matrix:
Tr_velo_to_cam = _extend_matrix(Tr_velo_to_cam)
Tr_imu_to_velo = _extend_matrix(Tr_imu_to_velo)
image_info['calib/Tr_velo_to_cam'] = Tr_velo_to_cam
image_info['calib/Tr_imu_to_velo'] = Tr_imu_to_velo
if annotations is not None:
image_info['annos'] = annotations
add_difficulty_to_annos(image_info)
return image_info
with futures.ThreadPoolExecutor(num_worker) as executor:
image_infos = executor.map(map_func, image_ids)
return list(image_infos)
def filter_kitti_anno(image_anno,
used_classes,
used_difficulty=None,
dontcare_iou=None):
"""Filter KITTI annotations."""
if not isinstance(used_classes, (list, tuple)):
used_classes = [used_classes]
img_filtered_annotations = {}
relevant_annotation_indices = [
i for i, x in enumerate(image_anno['name']) if x in used_classes
]
for key in image_anno.keys():
img_filtered_annotations[key] = (
image_anno[key][relevant_annotation_indices])
if used_difficulty is not None:
relevant_annotation_indices = [
i for i, x in enumerate(img_filtered_annotations['difficulty'])
if x in used_difficulty
]
for key in image_anno.keys():
img_filtered_annotations[key] = (
img_filtered_annotations[key][relevant_annotation_indices])
if 'DontCare' in used_classes and dontcare_iou is not None:
dont_care_indices = [
i for i, x in enumerate(img_filtered_annotations['name'])
if x == 'DontCare'
]
# bounding box format [y_min, x_min, y_max, x_max]
all_boxes = img_filtered_annotations['bbox']
ious = iou(all_boxes, all_boxes[dont_care_indices])
# Remove all bounding boxes that overlap with a dontcare region.
if ious.size > 0:
boxes_to_remove = np.amax(ious, axis=1) > dontcare_iou
for key in image_anno.keys():
img_filtered_annotations[key] = (img_filtered_annotations[key][
np.logical_not(boxes_to_remove)])
return img_filtered_annotations
def filter_annos_low_score(image_annos, thresh):
"""Filter annotations with low score."""
new_image_annos = []
for anno in image_annos:
img_filtered_annotations = {}
relevant_annotation_indices = [
i for i, s in enumerate(anno['score']) if s >= thresh
]
for key in anno.keys():
img_filtered_annotations[key] = (
anno[key][relevant_annotation_indices])
new_image_annos.append(img_filtered_annotations)
return new_image_annos
def kitti_result_line(result_dict, precision=4):
"""KITTI result line."""
prec_float = "{" + ":.{}f".format(precision) + "}"
res_line = []
all_field_default = OrderedDict([
('name', None),
('truncated', -1),
('occluded', -1),
('alpha', -10),
('bbox', None),
('dimensions', [-1, -1, -1]),
('location', [-1000, -1000, -1000]),
('rotation_y', -10),
('score', None),
])
res_dict = [(key, None) for key, val in all_field_default.items()]
res_dict = OrderedDict(res_dict)
for key, val in result_dict.items():
if all_field_default[key] is None and val is None:
raise ValueError("you must specify a value for {}".format(key))
res_dict[key] = val
for key, val in res_dict.items():
if key == 'name':
res_line.append(val)
elif key in ['truncated', 'alpha', 'rotation_y', 'score']:
if val is None:
res_line.append(str(all_field_default[key]))
else:
res_line.append(prec_float.format(val))
elif key == 'occluded':
if val is None:
res_line.append(str(all_field_default[key]))
else:
res_line.append('{}'.format(val))
elif key in ['bbox', 'dimensions', 'location']:
if val is None:
res_line += [str(v) for v in all_field_default[key]]
else:
res_line += [prec_float.format(v) for v in val]
else:
raise ValueError("unknown key. supported key:{}".format(
res_dict.keys()))
return ' '.join(res_line)
def add_difficulty_to_annos(info):
"""Add difficulty to annotations."""
min_height = [40, 25,
25] # minimum height for evaluated groundtruth/detections
max_occlusion = [
0, 1, 2
] # maximum occlusion level of the groundtruth used for eval_utils
max_trunc = [
0.15, 0.3, 0.5
] # maximum truncation level of the groundtruth used for eval_utils
annos = info['annos']
dims = annos['dimensions'] # lhw format
bbox = annos['bbox']
height = bbox[:, 3] - bbox[:, 1]
occlusion = annos['occluded']
truncation = annos['truncated']
diff = []
easy_mask = np.ones((len(dims), ), dtype=np.bool)
moderate_mask = np.ones((len(dims), ), dtype=np.bool)
hard_mask = np.ones((len(dims), ), dtype=np.bool)
i = 0
for h, o, t in zip(height, occlusion, truncation):
if o > max_occlusion[0] or h <= min_height[0] or t > max_trunc[0]:
easy_mask[i] = False
if o > max_occlusion[1] or h <= min_height[1] or t > max_trunc[1]:
moderate_mask[i] = False
if o > max_occlusion[2] or h <= min_height[2] or t > max_trunc[2]:
hard_mask[i] = False
i += 1
is_easy = easy_mask
is_moderate = np.logical_xor(easy_mask, moderate_mask)
is_hard = np.logical_xor(hard_mask, moderate_mask)
for i in range(len(dims)):
if is_easy[i]:
diff.append(0)
elif is_moderate[i]:
diff.append(1)
elif is_hard[i]:
diff.append(2)
else:
diff.append(-1)
annos["difficulty"] = np.array(diff, np.int32)
return diff
def get_label_anno(label_path):
"""Get label annotation."""
annotations = {}
annotations.update({
'name': [],
'truncated': [],
'occluded': [],
'alpha': [],
'bbox': [],
'dimensions': [],
'location': [],
'rotation_y': []
})
with open(label_path, 'r') as f:
lines = f.readlines()
# if len(lines) == 0 or len(lines[0]) < 15:
# content = []
# else:
content = [line.strip().split(' ') for line in lines]
annotations['name'] = np.array([x[0] for x in content])
annotations['truncated'] = np.array([float(x[1]) for x in content])
annotations['occluded'] = np.array([int(x[2]) for x in content])
annotations['alpha'] = np.array([float(x[3]) for x in content])
annotations['bbox'] = np.array(
[[float(info) for info in x[4:8]] for x in content]).reshape(-1, 4)
# dimensions will convert hwl format to standard lhw(camera) format.
annotations['dimensions'] = np.array(
[[float(info) for info in x[8:11]] for x in content]).reshape(
-1, 3)[:, [2, 0, 1]]
annotations['location'] = np.array(
[[float(info) for info in x[11:14]] for x in content]).reshape(-1, 3)
annotations['rotation_y'] = np.array(
[float(x[14]) for x in content]).reshape(-1)
if len(content) != 0 and len(content[0]) == 16: # have score
annotations['score'] = np.array([float(x[15]) for x in content])
else:
annotations['score'] = np.zeros([len(annotations['bbox'])])
return annotations
def get_label_annos(label_folder, image_ids=None):
"""Get label annotations."""
if image_ids is None:
filepaths = pathlib.Path(label_folder).glob('*.txt')
prog = re.compile(r'^\d{6}.txt$')
filepaths = filter(lambda f: prog.match(f.name), filepaths)
image_ids = [int(p.stem) for p in filepaths]
image_ids = sorted(image_ids)
if not isinstance(image_ids, list):
image_ids = list(range(image_ids))
annos = []
label_folder = pathlib.Path(label_folder)
for idx in image_ids:
image_idx = get_image_index_str(idx)
label_filename = label_folder / (image_idx + '.txt')
annos.append(get_label_anno(label_filename))
return annos
def area(boxes, add1=False):
"""Computes area of boxes.
Args:
boxes: Numpy array with shape [N, 4] holding N boxes
Returns:
a numpy array with shape [N*1] representing box areas
"""
if add1:
return (boxes[:, 2] - boxes[:, 0] + 1.0) * (
boxes[:, 3] - boxes[:, 1] + 1.0)
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
def intersection(boxes1, boxes2, add1=False):
"""Compute pairwise intersection areas between boxes.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes
boxes2: a numpy array with shape [M, 4] holding M boxes
Returns:
a numpy array with shape [N*M] representing pairwise intersection area
"""
[y_min1, x_min1, y_max1, x_max1] = np.split(boxes1, 4, axis=1)
[y_min2, x_min2, y_max2, x_max2] = np.split(boxes2, 4, axis=1)
all_pairs_min_ymax = np.minimum(y_max1, np.transpose(y_max2))
all_pairs_max_ymin = np.maximum(y_min1, np.transpose(y_min2))
if add1:
all_pairs_min_ymax += 1.0
intersect_heights = np.maximum(
np.zeros(all_pairs_max_ymin.shape),
all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = np.minimum(x_max1, np.transpose(x_max2))
all_pairs_max_xmin = np.maximum(x_min1, np.transpose(x_min2))
if add1:
all_pairs_min_xmax += 1.0
intersect_widths = np.maximum(
np.zeros(all_pairs_max_xmin.shape),
all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths
def iou(boxes1, boxes2, add1=False):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes.
boxes2: a numpy array with shape [M, 4] holding N boxes.
Returns:
a numpy array with shape [N, M] representing pairwise iou scores.
"""
intersect = intersection(boxes1, boxes2, add1)
area1 = area(boxes1, add1)
area2 = area(boxes2, add1)
union = np.expand_dims(
area1, axis=1) + np.expand_dims(
area2, axis=0) - intersect
return intersect / union
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/datasets/kitti/kitti_object_eval_python/kitti_common.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluation utils for PointPillars."""
import fire
from . import kitti_common as kitti
from .eval import get_coco_eval_result, get_official_eval_result
def _read_imageset_file(path):
with open(path, 'r') as f:
lines = f.readlines()
return [int(line) for line in lines]
def evaluate(label_path,
result_path,
label_split_file,
current_class=0,
coco=False,
score_thresh=-1):
"""Do evaluation."""
dt_annos = kitti.get_label_annos(result_path)
if score_thresh > 0:
dt_annos = kitti.filter_annos_low_score(dt_annos, score_thresh)
val_image_ids = _read_imageset_file(label_split_file)
gt_annos = kitti.get_label_annos(label_path, val_image_ids)
if coco:
return get_coco_eval_result(gt_annos, dt_annos, current_class)
return get_official_eval_result(gt_annos, dt_annos, current_class)
if __name__ == '__main__':
fire.Fire()
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/datasets/kitti/kitti_object_eval_python/evaluate.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluation utils."""
import io as sysio
import numpy as np
from .rotate_iou_pycuda import rotate_iou_gpu_eval
def get_thresholds(scores: np.ndarray, num_gt, num_sample_pts=41):
"""Get thresholds."""
scores.sort()
scores = scores[::-1]
current_recall = 0
thresholds = []
for i, score in enumerate(scores):
l_recall = (i + 1) / num_gt
if i < (len(scores) - 1):
r_recall = (i + 2) / num_gt
else:
r_recall = l_recall
if (((r_recall - current_recall) < (current_recall - l_recall)) and (i < (len(scores) - 1))):
continue
# recall = l_recall
thresholds.append(score)
current_recall += 1 / (num_sample_pts - 1.0)
return thresholds
def clean_data(gt_anno, dt_anno, current_class, difficulty):
"""Clean data."""
CLASS_NAMES = ['car', 'pedestrian', 'cyclist', 'van', 'person_sitting', 'truck']
MIN_HEIGHT = [40, 25, 25]
MAX_OCCLUSION = [0, 1, 2]
MAX_TRUNCATION = [0.15, 0.3, 0.5]
dc_bboxes, ignored_gt, ignored_dt = [], [], []
current_cls_name = CLASS_NAMES[current_class].lower()
num_gt = len(gt_anno["name"])
num_dt = len(dt_anno["name"])
num_valid_gt = 0
for i in range(num_gt):
bbox = gt_anno["bbox"][i]
gt_name = gt_anno["name"][i].lower()
height = bbox[3] - bbox[1]
valid_class = -1
if (gt_name == current_cls_name):
valid_class = 1
elif (current_cls_name == "Pedestrian".lower() and "Person_sitting".lower() == gt_name):
valid_class = 0
elif (current_cls_name == "Car".lower() and "Van".lower() == gt_name):
valid_class = 0
else:
valid_class = -1
ignore = False
if ((gt_anno["occluded"][i] > MAX_OCCLUSION[difficulty]) or (gt_anno["truncated"][i] > MAX_TRUNCATION[difficulty]) or (height <= MIN_HEIGHT[difficulty])):
# if gt_anno["difficulty"][i] > difficulty or gt_anno["difficulty"][i] == -1:
ignore = True
if valid_class == 1 and not ignore:
ignored_gt.append(0)
num_valid_gt += 1
elif (valid_class == 0 or (ignore and (valid_class == 1))):
ignored_gt.append(1)
else:
ignored_gt.append(-1)
# for i in range(num_gt):
if gt_anno["name"][i] == "DontCare":
dc_bboxes.append(gt_anno["bbox"][i])
for i in range(num_dt):
if (dt_anno["name"][i].lower() == current_cls_name):
valid_class = 1
else:
valid_class = -1
height = abs(dt_anno["bbox"][i, 3] - dt_anno["bbox"][i, 1])
if height < MIN_HEIGHT[difficulty]:
ignored_dt.append(1)
elif valid_class == 1:
ignored_dt.append(0)
else:
ignored_dt.append(-1)
return num_valid_gt, ignored_gt, ignored_dt, dc_bboxes
def clean_data_kitti(gt_anno, dt_anno, current_class, difficulty):
"""Clean KITTI data."""
dc_bboxes, ignored_gt, ignored_dt = [], [], []
num_gt = len(gt_anno["name"])
num_dt = len(dt_anno["name"])
num_valid_gt = 0
for i in range(num_gt):
gt_name = gt_anno["name"][i]
valid_class = -1
if (gt_name == current_class):
valid_class = 1
else:
valid_class = -1
ignore = False
if valid_class == 1 and not ignore:
ignored_gt.append(0)
num_valid_gt += 1
elif (valid_class == 0 or (ignore and (valid_class == 1))):
ignored_gt.append(1)
else:
ignored_gt.append(-1)
for i in range(num_dt):
if (dt_anno["name"][i] == current_class):
valid_class = 1
else:
valid_class = -1
if valid_class == 1:
ignored_dt.append(0)
else:
ignored_dt.append(-1)
return num_valid_gt, ignored_gt, ignored_dt, dc_bboxes
def image_box_overlap(boxes, query_boxes, criterion=-1):
"""Image box overlap."""
N = boxes.shape[0]
K = query_boxes.shape[0]
overlaps = np.zeros((N, K), dtype=boxes.dtype)
for k in range(K):
qbox_area = ((query_boxes[k, 2] - query_boxes[k, 0]) *
(query_boxes[k, 3] - query_boxes[k, 1]))
for n in range(N):
iw = (min(boxes[n, 2], query_boxes[k, 2]) -
max(boxes[n, 0], query_boxes[k, 0]))
if iw > 0:
ih = (min(boxes[n, 3], query_boxes[k, 3]) -
max(boxes[n, 1], query_boxes[k, 1]))
if ih > 0:
if criterion == -1:
ua = (
(boxes[n, 2] - boxes[n, 0]) *
(boxes[n, 3] - boxes[n, 1]) + qbox_area - iw * ih)
elif criterion == 0:
ua = ((boxes[n, 2] - boxes[n, 0]) *
(boxes[n, 3] - boxes[n, 1]))
elif criterion == 1:
ua = qbox_area
else:
ua = 1.0
overlaps[n, k] = iw * ih / ua
return overlaps
def bev_box_overlap(boxes, qboxes, criterion=-1):
"""BEV box overlap."""
riou = rotate_iou_gpu_eval(boxes, qboxes, criterion)
return riou
def d3_box_overlap_kernel(boxes, qboxes, rinc, criterion=-1):
"""3D box overlap kernel."""
# ONLY support overlap in CAMERA, not lider.
N, K = boxes.shape[0], qboxes.shape[0]
for i in range(N):
for j in range(K):
if rinc[i, j] > 0:
# iw = (min(boxes[i, 1] + boxes[i, 4], qboxes[j, 1] +
# qboxes[j, 4]) - max(boxes[i, 1], qboxes[j, 1]))
iw = (min(boxes[i, 1], qboxes[j, 1]) - max(
boxes[i, 1] - boxes[i, 4], qboxes[j, 1] - qboxes[j, 4]))
if iw > 0:
area1 = boxes[i, 3] * boxes[i, 4] * boxes[i, 5]
area2 = qboxes[j, 3] * qboxes[j, 4] * qboxes[j, 5]
inc = iw * rinc[i, j]
if criterion == -1:
ua = (area1 + area2 - inc)
elif criterion == 0:
ua = area1
elif criterion == 1:
ua = area2
else:
ua = inc
rinc[i, j] = inc / ua
else:
rinc[i, j] = 0.0
def d3_box_overlap_kernel_lidar(boxes, qboxes, rinc, criterion=-1):
"""3D box overlap kernel of LIDAR."""
N, K = boxes.shape[0], qboxes.shape[0]
for i in range(N):
for j in range(K):
if rinc[i, j] > 0:
iw = (min(boxes[i, 2], qboxes[j, 2]) - max(
boxes[i, 2] - boxes[i, 5], qboxes[j, 2] - qboxes[j, 5]))
if iw > 0:
area1 = boxes[i, 3] * boxes[i, 4] * boxes[i, 5]
area2 = qboxes[j, 3] * qboxes[j, 4] * qboxes[j, 5]
inc = iw * rinc[i, j]
if criterion == -1:
ua = (area1 + area2 - inc)
elif criterion == 0:
ua = area1
elif criterion == 1:
ua = area2
else:
ua = inc
rinc[i, j] = inc / ua
else:
rinc[i, j] = 0.0
def d3_box_overlap(boxes, qboxes, criterion=-1):
"""3D box overlap."""
rinc = rotate_iou_gpu_eval(boxes[:, [0, 2, 3, 5, 6]],
qboxes[:, [0, 2, 3, 5, 6]], 2)
d3_box_overlap_kernel(boxes, qboxes, rinc, criterion)
return rinc
def d3_box_overlap_lidar(boxes, qboxes, criterion=-1):
"""3D box overlap of LIDAR."""
rinc = rotate_iou_gpu_eval(boxes[:, [0, 1, 3, 4, 6]],
qboxes[:, [0, 1, 3, 4, 6]], 2)
d3_box_overlap_kernel_lidar(boxes, qboxes, rinc, criterion)
return rinc
def compute_statistics_jit(overlaps,
gt_datas,
dt_datas,
ignored_gt,
ignored_det,
dc_bboxes,
metric,
min_overlap,
thresh=0,
compute_fp=False,
compute_aos=False):
"""Compute statistics."""
det_size = dt_datas.shape[0]
gt_size = gt_datas.shape[0]
dt_scores = dt_datas[:, -1]
dt_alphas = dt_datas[:, 4]
gt_alphas = gt_datas[:, 4]
dt_bboxes = dt_datas[:, :4]
assigned_detection = [False] * det_size
ignored_threshold = [False] * det_size
if compute_fp:
for i in range(det_size):
if (dt_scores[i] < thresh):
ignored_threshold[i] = True
NO_DETECTION = -10000000
tp, fp, fn, similarity = 0, 0, 0, 0
# thresholds = [0.0]
# delta = [0.0]
thresholds = np.zeros((gt_size, ))
thresh_idx = 0
delta = np.zeros((gt_size, ))
delta_idx = 0
for i in range(gt_size):
if ignored_gt[i] == -1:
continue
det_idx = -1
valid_detection = NO_DETECTION
max_overlap = 0
assigned_ignored_det = False
for j in range(det_size):
if (ignored_det[j] == -1):
continue
if (assigned_detection[j]):
continue
if (ignored_threshold[j]):
continue
overlap = overlaps[j, i]
dt_score = dt_scores[j]
if (not compute_fp and (overlap > min_overlap) and dt_score > valid_detection):
det_idx = j
valid_detection = dt_score
elif (compute_fp and (overlap > min_overlap) and (overlap > max_overlap or assigned_ignored_det) and ignored_det[j] == 0):
max_overlap = overlap
det_idx = j
valid_detection = 1
assigned_ignored_det = False
elif (compute_fp and (overlap > min_overlap) and (valid_detection == NO_DETECTION) and ignored_det[j] == 1):
det_idx = j
valid_detection = 1
assigned_ignored_det = True
if (valid_detection == NO_DETECTION) and ignored_gt[i] == 0:
fn += 1
elif ((valid_detection != NO_DETECTION) and (ignored_gt[i] == 1 or ignored_det[det_idx] == 1)):
assigned_detection[det_idx] = True
elif valid_detection != NO_DETECTION:
tp += 1
# thresholds.append(dt_scores[det_idx])
thresholds[thresh_idx] = dt_scores[det_idx]
thresh_idx += 1
if compute_aos:
# delta.append(gt_alphas[i] - dt_alphas[det_idx])
delta[delta_idx] = gt_alphas[i] - dt_alphas[det_idx]
delta_idx += 1
assigned_detection[det_idx] = True
if compute_fp:
for i in range(det_size):
if (not (assigned_detection[i] or ignored_det[i] == -1 or ignored_det[i] == 1 or ignored_threshold[i])):
fp += 1
nstuff = 0
if metric == 0:
overlaps_dt_dc = image_box_overlap(dt_bboxes, dc_bboxes, 0)
for i in range(dc_bboxes.shape[0]):
for j in range(det_size):
if (assigned_detection[j]):
continue
if (ignored_det[j] == -1 or ignored_det[j] == 1):
continue
if (ignored_threshold[j]):
continue
if overlaps_dt_dc[j, i] > min_overlap:
assigned_detection[j] = True
nstuff += 1
fp -= nstuff
if compute_aos:
tmp = np.zeros((fp + delta_idx, ))
# tmp = [0] * fp
for i in range(delta_idx):
tmp[i + fp] = (1.0 + np.cos(delta[i])) / 2.0
# tmp.append((1.0 + np.cos(delta[i])) / 2.0)
# assert len(tmp) == fp + tp
# assert len(delta) == tp
if tp > 0 or fp > 0:
similarity = np.sum(tmp)
else:
similarity = -1
return tp, fp, fn, similarity, thresholds[:thresh_idx]
def get_split_parts(num, num_part):
"""Get splits."""
same_part = num // num_part
remain_num = num % num_part
if same_part == 0:
return [num]
if remain_num == 0:
return [same_part] * num_part
return [same_part] * num_part + [remain_num]
def fused_compute_statistics(overlaps,
pr,
gt_nums,
dt_nums,
dc_nums,
gt_datas,
dt_datas,
dontcares,
ignored_gts,
ignored_dets,
metric,
min_overlap,
thresholds,
compute_aos=False):
"""Fused compute statistics."""
gt_num = 0
dt_num = 0
dc_num = 0
for i in range(gt_nums.shape[0]):
for t, thresh in enumerate(thresholds):
overlap = overlaps[dt_num:dt_num + dt_nums[i], gt_num:
gt_num + gt_nums[i]]
gt_data = gt_datas[gt_num:gt_num + gt_nums[i]]
dt_data = dt_datas[dt_num:dt_num + dt_nums[i]]
ignored_gt = ignored_gts[gt_num:gt_num + gt_nums[i]]
ignored_det = ignored_dets[dt_num:dt_num + dt_nums[i]]
dontcare = dontcares[dc_num:dc_num + dc_nums[i]]
tp, fp, fn, similarity, _ = compute_statistics_jit(
overlap,
gt_data,
dt_data,
ignored_gt,
ignored_det,
dontcare,
metric,
min_overlap=min_overlap,
thresh=thresh,
compute_fp=True,
compute_aos=compute_aos)
pr[t, 0] += tp
pr[t, 1] += fp
pr[t, 2] += fn
if similarity != -1:
pr[t, 3] += similarity
gt_num += gt_nums[i]
dt_num += dt_nums[i]
dc_num += dc_nums[i]
def calculate_iou_partly(gt_annos, dt_annos, metric, num_parts=50):
"""fast iou algorithm. this function can be used independently to
do result analysis. Must be used in CAMERA coordinate system.
Args:
gt_annos: dict, must from get_label_annos() in kitti_common.py
dt_annos: dict, must from get_label_annos() in kitti_common.py
metric: eval type. 0: bbox, 1: bev, 2: 3d
num_parts: int. a parameter for fast calculate algorithm
"""
assert len(gt_annos) == len(dt_annos)
total_dt_num = np.stack([len(a["name"]) for a in dt_annos], 0)
total_gt_num = np.stack([len(a["name"]) for a in gt_annos], 0)
num_examples = len(gt_annos)
split_parts = get_split_parts(num_examples, num_parts)
parted_overlaps = []
example_idx = 0
for num_part in split_parts:
gt_annos_part = gt_annos[example_idx:example_idx + num_part]
dt_annos_part = dt_annos[example_idx:example_idx + num_part]
if metric == 0:
gt_boxes = np.concatenate([a["bbox"] for a in gt_annos_part], 0)
dt_boxes = np.concatenate([a["bbox"] for a in dt_annos_part], 0)
overlap_part = image_box_overlap(gt_boxes, dt_boxes)
elif metric == 1:
loc = np.concatenate(
[a["location"][:, [0, 2]] for a in gt_annos_part], 0)
dims = np.concatenate(
[a["dimensions"][:, [0, 2]] for a in gt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in gt_annos_part], 0)
gt_boxes = np.concatenate(
[loc, dims, rots[..., np.newaxis]], axis=1)
loc = np.concatenate(
[a["location"][:, [0, 2]] for a in dt_annos_part], 0)
dims = np.concatenate(
[a["dimensions"][:, [0, 2]] for a in dt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in dt_annos_part], 0)
dt_boxes = np.concatenate(
[loc, dims, rots[..., np.newaxis]], axis=1)
overlap_part = bev_box_overlap(gt_boxes, dt_boxes).astype(
np.float64)
elif metric == 2:
loc = np.concatenate([a["location"] for a in gt_annos_part], 0)
dims = np.concatenate([a["dimensions"] for a in gt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in gt_annos_part], 0)
gt_boxes = np.concatenate(
[loc, dims, rots[..., np.newaxis]], axis=1)
loc = np.concatenate([a["location"] for a in dt_annos_part], 0)
dims = np.concatenate([a["dimensions"] for a in dt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in dt_annos_part], 0)
dt_boxes = np.concatenate(
[loc, dims, rots[..., np.newaxis]], axis=1)
overlap_part = d3_box_overlap(gt_boxes, dt_boxes).astype(
np.float64)
else:
raise ValueError("unknown metric")
parted_overlaps.append(overlap_part)
example_idx += num_part
overlaps = []
example_idx = 0
for j, num_part in enumerate(split_parts):
gt_annos_part = gt_annos[example_idx:example_idx + num_part]
dt_annos_part = dt_annos[example_idx:example_idx + num_part]
gt_num_idx, dt_num_idx = 0, 0
for i in range(num_part):
gt_box_num = total_gt_num[example_idx + i]
dt_box_num = total_dt_num[example_idx + i]
overlaps.append(
parted_overlaps[j][gt_num_idx:gt_num_idx + gt_box_num,
dt_num_idx:dt_num_idx + dt_box_num])
gt_num_idx += gt_box_num
dt_num_idx += dt_box_num
example_idx += num_part
return overlaps, parted_overlaps, total_gt_num, total_dt_num
def calculate_iou_partly_kitti(dt_annos, gt_annos, metric, num_parts=50):
"""fast iou algorithm. this function can be used independently to
do result analysis. Must be used in CAMERA coordinate system.
Args:
gt_annos: dict, must from get_label_annos() in kitti_common.py
dt_annos: dict, must from get_label_annos() in kitti_common.py
metric: eval type. 0: bbox, 1: bev, 2: 3d
num_parts: int. a parameter for fast calculate algorithm
"""
assert len(gt_annos) == len(dt_annos)
total_dt_num = np.stack([len(a["name"]) for a in dt_annos], 0)
total_gt_num = np.stack([len(a["name"]) for a in gt_annos], 0)
num_examples = len(gt_annos)
split_parts = get_split_parts(num_examples, num_parts)
parted_overlaps = []
example_idx = 0
for num_part in split_parts:
gt_annos_part = gt_annos[example_idx:example_idx + num_part]
dt_annos_part = dt_annos[example_idx:example_idx + num_part]
if metric == 1:
dt_boxes_lidar = np.concatenate(
[a["boxes_lidar"][:, [0, 1, 3, 4, 6]] for a in dt_annos_part], 0
)
dt_boxes_lidar[:, -1] *= -1
gt_boxes_lidar = np.concatenate(
[a["gt_boxes_lidar"][:, [0, 1, 3, 4, 6]] for a in gt_annos_part], 0
)
gt_boxes_lidar[:, -1] *= -1
overlap_part = bev_box_overlap(dt_boxes_lidar, gt_boxes_lidar).astype(
np.float64)
elif metric == 2:
dt_boxes_lidar = np.concatenate(
[a["boxes_lidar"] for a in dt_annos_part], 0
)
dt_boxes_lidar[:, -1] *= -1
gt_boxes_lidar = np.concatenate(
[a["gt_boxes_lidar"] for a in gt_annos_part], 0
)
gt_boxes_lidar[:, -1] *= -1
overlap_part = d3_box_overlap_lidar(dt_boxes_lidar, gt_boxes_lidar).astype(
np.float64)
else:
raise ValueError("unknown metric")
parted_overlaps.append(overlap_part)
example_idx += num_part
overlaps = []
example_idx = 0
for j, num_part in enumerate(split_parts):
gt_annos_part = gt_annos[example_idx:example_idx + num_part]
dt_annos_part = dt_annos[example_idx:example_idx + num_part]
gt_num_idx, dt_num_idx = 0, 0
for i in range(num_part):
dt_box_num = total_dt_num[example_idx + i]
gt_box_num = total_gt_num[example_idx + i]
overlaps.append(
parted_overlaps[j][dt_num_idx:dt_num_idx + dt_box_num,
gt_num_idx:gt_num_idx + gt_box_num])
gt_num_idx += gt_box_num
dt_num_idx += dt_box_num
example_idx += num_part
return overlaps, parted_overlaps, total_dt_num, total_gt_num
def _prepare_data(gt_annos, dt_annos, current_class, difficulty):
gt_datas_list = []
dt_datas_list = []
total_dc_num = []
ignored_gts, ignored_dets, dontcares = [], [], []
total_num_valid_gt = 0
for i in range(len(gt_annos)):
rets = clean_data(gt_annos[i], dt_annos[i], current_class, difficulty)
num_valid_gt, ignored_gt, ignored_det, dc_bboxes = rets
ignored_gts.append(np.array(ignored_gt, dtype=np.int64))
ignored_dets.append(np.array(ignored_det, dtype=np.int64))
if len(dc_bboxes) == 0:
dc_bboxes = np.zeros((0, 4)).astype(np.float64)
else:
dc_bboxes = np.stack(dc_bboxes, 0).astype(np.float64)
total_dc_num.append(dc_bboxes.shape[0])
dontcares.append(dc_bboxes)
total_num_valid_gt += num_valid_gt
gt_datas = np.concatenate(
[gt_annos[i]["bbox"], gt_annos[i]["alpha"][..., np.newaxis]], 1)
dt_datas = np.concatenate([
dt_annos[i]["bbox"], dt_annos[i]["alpha"][..., np.newaxis],
dt_annos[i]["score"][..., np.newaxis]
], 1)
gt_datas_list.append(gt_datas)
dt_datas_list.append(dt_datas)
total_dc_num = np.stack(total_dc_num, axis=0)
return (gt_datas_list, dt_datas_list, ignored_gts, ignored_dets, dontcares,
total_dc_num, total_num_valid_gt)
def _prepare_data_kitti(gt_annos, dt_annos, current_class, difficulty):
gt_datas_list = []
dt_datas_list = []
total_dc_num = []
ignored_gts, ignored_dets, dontcares = [], [], []
total_num_valid_gt = 0
for i in range(len(gt_annos)):
rets = clean_data_kitti(gt_annos[i], dt_annos[i], current_class, difficulty)
num_valid_gt, ignored_gt, ignored_det, dc_bboxes = rets
ignored_gts.append(np.array(ignored_gt, dtype=np.int64))
ignored_dets.append(np.array(ignored_det, dtype=np.int64))
if len(dc_bboxes) == 0:
dc_bboxes = np.zeros((0, 4)).astype(np.float64)
else:
dc_bboxes = np.stack(dc_bboxes, 0).astype(np.float64)
total_dc_num.append(dc_bboxes.shape[0])
dontcares.append(dc_bboxes)
total_num_valid_gt += num_valid_gt
gt_datas = np.concatenate(
[gt_annos[i]["bbox"], gt_annos[i]["alpha"][..., np.newaxis]], 1)
dt_datas = np.concatenate([
dt_annos[i]["bbox"], dt_annos[i]["alpha"][..., np.newaxis],
dt_annos[i]["score"][..., np.newaxis]
], 1)
gt_datas_list.append(gt_datas)
dt_datas_list.append(dt_datas)
total_dc_num = np.stack(total_dc_num, axis=0)
return (gt_datas_list, dt_datas_list, ignored_gts, ignored_dets, dontcares,
total_dc_num, total_num_valid_gt)
def eval_class(gt_annos,
dt_annos,
current_classes,
difficultys,
metric,
min_overlaps,
compute_aos=False,
num_parts=100):
"""Kitti eval. support 2d/bev/3d/aos eval. support 0.5:0.05:0.95 coco AP.
Args:
gt_annos: dict, must from get_label_annos() in kitti_common.py
dt_annos: dict, must from get_label_annos() in kitti_common.py
current_classes: list of int, 0: car, 1: pedestrian, 2: cyclist
difficultys: list of int. eval difficulty, 0: easy, 1: normal, 2: hard
metric: eval type. 0: bbox, 1: bev, 2: 3d
min_overlaps: float, min overlap. format: [num_overlap, metric, class].
num_parts: int. a parameter for fast calculate algorithm
Returns:
dict of recall, precision and aos
"""
assert len(gt_annos) == len(dt_annos)
num_examples = len(gt_annos)
split_parts = get_split_parts(num_examples, num_parts)
rets = calculate_iou_partly(dt_annos, gt_annos, metric, num_parts)
overlaps, parted_overlaps, total_dt_num, total_gt_num = rets
N_SAMPLE_PTS = 41
num_minoverlap = len(min_overlaps)
num_class = len(current_classes)
num_difficulty = len(difficultys)
precision = np.zeros(
[num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
recall = np.zeros(
[num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
aos = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
for m, current_class in enumerate(current_classes):
for l, difficulty in enumerate(difficultys): # noqa: E741
rets = _prepare_data(gt_annos, dt_annos, current_class, difficulty)
(gt_datas_list, dt_datas_list, ignored_gts, ignored_dets,
dontcares, total_dc_num, total_num_valid_gt) = rets
for k, min_overlap in enumerate(min_overlaps[:, metric, m]):
thresholdss = []
for i in range(len(gt_annos)):
rets = compute_statistics_jit(
overlaps[i],
gt_datas_list[i],
dt_datas_list[i],
ignored_gts[i],
ignored_dets[i],
dontcares[i],
metric,
min_overlap=min_overlap,
thresh=0.0,
compute_fp=False)
thresholds = rets[-1]
thresholdss += thresholds.tolist()
thresholdss = np.array(thresholdss)
thresholds = get_thresholds(thresholdss, total_num_valid_gt)
thresholds = np.array(thresholds)
pr = np.zeros([len(thresholds), 4])
idx = 0
for j, num_part in enumerate(split_parts):
gt_datas_part = np.concatenate(
gt_datas_list[idx:idx + num_part], 0)
dt_datas_part = np.concatenate(
dt_datas_list[idx:idx + num_part], 0)
dc_datas_part = np.concatenate(
dontcares[idx:idx + num_part], 0)
ignored_dets_part = np.concatenate(
ignored_dets[idx:idx + num_part], 0)
ignored_gts_part = np.concatenate(
ignored_gts[idx:idx + num_part], 0)
fused_compute_statistics(
parted_overlaps[j],
pr,
total_gt_num[idx:idx + num_part],
total_dt_num[idx:idx + num_part],
total_dc_num[idx:idx + num_part],
gt_datas_part,
dt_datas_part,
dc_datas_part,
ignored_gts_part,
ignored_dets_part,
metric,
min_overlap=min_overlap,
thresholds=thresholds,
compute_aos=compute_aos)
idx += num_part
for i in range(len(thresholds)):
recall[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 2])
precision[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 1])
if compute_aos:
aos[m, l, k, i] = pr[i, 3] / (pr[i, 0] + pr[i, 1])
for i in range(len(thresholds)):
precision[m, l, k, i] = np.max(
precision[m, l, k, i:], axis=-1)
recall[m, l, k, i] = np.max(recall[m, l, k, i:], axis=-1)
if compute_aos:
aos[m, l, k, i] = np.max(aos[m, l, k, i:], axis=-1)
ret_dict = {
"recall": recall,
"precision": precision,
"orientation": aos,
}
return ret_dict
def eval_class_kitti(gt_annos,
dt_annos,
current_classes,
difficultys,
metric,
min_overlaps,
compute_aos=False,
num_parts=100):
"""Kitti eval. support 2d/bev/3d/aos eval. support 0.5:0.05:0.95 coco AP.
Args:
gt_annos: dict, must from get_label_annos() in kitti_common.py
dt_annos: dict, must from get_label_annos() in kitti_common.py
current_classes: list of class names.
difficultys: list of int. eval difficulty, 0: easy, 1: normal, 2: hard
metric: eval type. 0: bbox, 1: bev, 2: 3d
min_overlaps: float, min overlap. format: [num_overlap, metric, class].
num_parts: int. a parameter for fast calculate algorithm
Returns:
dict of recall, precision and aos
"""
assert len(gt_annos) == len(dt_annos)
num_examples = len(gt_annos)
split_parts = get_split_parts(num_examples, num_parts)
rets = calculate_iou_partly_kitti(dt_annos, gt_annos, metric, num_parts)
overlaps, parted_overlaps, total_dt_num, total_gt_num = rets
N_SAMPLE_PTS = 41
num_minoverlap = len(min_overlaps)
num_class = len(current_classes)
num_difficulty = len(difficultys)
precision = np.zeros(
[num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
recall = np.zeros(
[num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
aos = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
for m, current_class in enumerate(current_classes):
for l, difficulty in enumerate(difficultys): # noqa: E741
rets = _prepare_data_kitti(gt_annos, dt_annos, current_class, difficulty)
(gt_datas_list, dt_datas_list, ignored_gts, ignored_dets,
dontcares, total_dc_num, total_num_valid_gt) = rets
for k, min_overlap in enumerate(min_overlaps[:, metric, m]):
thresholdss = []
for i in range(len(gt_annos)):
rets = compute_statistics_jit(
overlaps[i],
gt_datas_list[i],
dt_datas_list[i],
ignored_gts[i],
ignored_dets[i],
dontcares[i],
metric,
min_overlap=min_overlap,
thresh=0.0,
compute_fp=False)
thresholds = rets[-1]
thresholdss += thresholds.tolist()
thresholdss = np.array(thresholdss)
thresholds = get_thresholds(thresholdss, total_num_valid_gt)
thresholds = np.array(thresholds)
pr = np.zeros([len(thresholds), 4])
idx = 0
for j, num_part in enumerate(split_parts):
gt_datas_part = np.concatenate(
gt_datas_list[idx:idx + num_part], 0)
dt_datas_part = np.concatenate(
dt_datas_list[idx:idx + num_part], 0)
dc_datas_part = np.concatenate(
dontcares[idx:idx + num_part], 0)
ignored_dets_part = np.concatenate(
ignored_dets[idx:idx + num_part], 0)
ignored_gts_part = np.concatenate(
ignored_gts[idx:idx + num_part], 0)
fused_compute_statistics(
parted_overlaps[j],
pr,
total_gt_num[idx:idx + num_part],
total_dt_num[idx:idx + num_part],
total_dc_num[idx:idx + num_part],
gt_datas_part,
dt_datas_part,
dc_datas_part,
ignored_gts_part,
ignored_dets_part,
metric,
min_overlap=min_overlap,
thresholds=thresholds,
compute_aos=compute_aos)
idx += num_part
for i in range(len(thresholds)):
recall[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 2])
precision[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 1])
if compute_aos:
aos[m, l, k, i] = pr[i, 3] / (pr[i, 0] + pr[i, 1])
for i in range(len(thresholds)):
precision[m, l, k, i] = np.max(
precision[m, l, k, i:], axis=-1)
recall[m, l, k, i] = np.max(recall[m, l, k, i:], axis=-1)
if compute_aos:
aos[m, l, k, i] = np.max(aos[m, l, k, i:], axis=-1)
ret_dict = {
"recall": recall,
"precision": precision,
"orientation": aos,
}
return ret_dict
def get_mAP(prec):
"""Get mAP."""
sums = 0
for i in range(0, prec.shape[-1], 4):
sums = sums + prec[..., i]
return sums / 11 * 100
def get_mAP_R40(prec):
"""Get mAP of R40."""
sums = 0
for i in range(1, prec.shape[-1]):
sums = sums + prec[..., i]
return sums / 40 * 100
def print_str(value, *arg, sstream=None):
"""Print string."""
if sstream is None:
sstream = sysio.StringIO()
sstream.truncate(0)
sstream.seek(0)
print(value, *arg, file=sstream)
return sstream.getvalue()
def do_eval(gt_annos,
dt_annos,
current_classes,
min_overlaps,
compute_aos=False,
PR_detail_dict=None):
"""Do evaluation."""
# min_overlaps: [num_minoverlap, metric, num_class]
difficultys = [0, 1, 2]
ret = eval_class(gt_annos, dt_annos, current_classes, difficultys, 0,
min_overlaps, compute_aos)
# ret: [num_class, num_diff, num_minoverlap, num_sample_points]
mAP_bbox = get_mAP(ret["precision"])
mAP_bbox_R40 = get_mAP_R40(ret["precision"])
if PR_detail_dict is not None:
PR_detail_dict['bbox'] = ret['precision']
mAP_aos = mAP_aos_R40 = None
if compute_aos:
mAP_aos = get_mAP(ret["orientation"])
mAP_aos_R40 = get_mAP_R40(ret["orientation"])
if PR_detail_dict is not None:
PR_detail_dict['aos'] = ret['orientation']
ret = eval_class(gt_annos, dt_annos, current_classes, difficultys, 1,
min_overlaps)
mAP_bev = get_mAP(ret["precision"])
mAP_bev_R40 = get_mAP_R40(ret["precision"])
if PR_detail_dict is not None:
PR_detail_dict['bev'] = ret['precision']
ret = eval_class(gt_annos, dt_annos, current_classes, difficultys, 2,
min_overlaps)
mAP_3d = get_mAP(ret["precision"])
mAP_3d_R40 = get_mAP_R40(ret["precision"])
if PR_detail_dict is not None:
PR_detail_dict['3d'] = ret['precision']
return mAP_bbox, mAP_bev, mAP_3d, mAP_aos, mAP_bbox_R40, mAP_bev_R40, mAP_3d_R40, mAP_aos_R40
def do_eval_kitti(gt_annos,
dt_annos,
current_classes,
min_overlaps,
compute_aos=False,
PR_detail_dict=None):
"""Do KITTI evaluation."""
# min_overlaps: [num_minoverlap, metric, num_class]
difficultys = [0]
ret = eval_class_kitti(gt_annos, dt_annos, current_classes, difficultys, 1, min_overlaps)
mAP_bev = get_mAP(ret["precision"])
if PR_detail_dict is not None:
PR_detail_dict['bev'] = ret['precision']
ret = eval_class_kitti(gt_annos, dt_annos, current_classes, difficultys, 2, min_overlaps)
mAP_3d = get_mAP(ret["precision"])
if PR_detail_dict is not None:
PR_detail_dict['3d'] = ret['precision']
return mAP_bev, mAP_3d
def do_coco_style_eval(gt_annos, dt_annos, current_classes, overlap_ranges,
compute_aos):
"""Do COCO style evaluation."""
# overlap_ranges: [range, metric, num_class]
min_overlaps = np.zeros([10, *overlap_ranges.shape[1:]])
for i in range(overlap_ranges.shape[1]):
for j in range(overlap_ranges.shape[2]):
min_overlaps[:, i, j] = np.linspace(*overlap_ranges[:, i, j])
mAP_bbox, mAP_bev, mAP_3d, mAP_aos = do_eval(
gt_annos, dt_annos, current_classes, min_overlaps, compute_aos)
# ret: [num_class, num_diff, num_minoverlap]
mAP_bbox = mAP_bbox.mean(-1)
mAP_bev = mAP_bev.mean(-1)
mAP_3d = mAP_3d.mean(-1)
if mAP_aos is not None:
mAP_aos = mAP_aos.mean(-1)
return mAP_bbox, mAP_bev, mAP_3d, mAP_aos
def get_official_eval_result(gt_annos, dt_annos, current_classes, PR_detail_dict=None):
"""Get KITTI official KPI."""
overlap_0_7 = np.array([[0.7, 0.5, 0.5, 0.7,
0.5, 0.7], [0.7, 0.5, 0.5, 0.7, 0.5, 0.7],
[0.7, 0.5, 0.5, 0.7, 0.5, 0.7]])
overlap_0_5 = np.array([[0.7, 0.5, 0.5, 0.7,
0.5, 0.5], [0.5, 0.25, 0.25, 0.5, 0.25, 0.5],
[0.5, 0.25, 0.25, 0.5, 0.25, 0.5]])
min_overlaps = np.stack([overlap_0_7, overlap_0_5], axis=0) # [2, 3, 5]
class_to_name = {
0: 'Car',
1: 'Pedestrian',
2: 'Cyclist',
3: 'Van',
4: 'Person_sitting',
5: 'Truck'
}
name_to_class = {v: n for n, v in class_to_name.items()}
if not isinstance(current_classes, (list, tuple)):
current_classes = [current_classes]
current_classes_int = []
for curcls in current_classes:
if isinstance(curcls, str):
current_classes_int.append(name_to_class[curcls])
else:
current_classes_int.append(curcls)
current_classes = current_classes_int
min_overlaps = min_overlaps[:, :, current_classes]
result = ''
# check whether alpha is valid
compute_aos = False
for anno in dt_annos:
if anno['alpha'].shape[0] != 0:
if anno['alpha'][0] != -10:
compute_aos = True
break
mAPbbox, mAPbev, mAP3d, mAPaos, mAPbbox_R40, mAPbev_R40, mAP3d_R40, mAPaos_R40 = do_eval(
gt_annos, dt_annos, current_classes, min_overlaps, compute_aos, PR_detail_dict=PR_detail_dict)
ret_dict = {}
for j, curcls in enumerate(current_classes):
# mAP threshold array: [num_minoverlap, metric, class]
# mAP result: [num_class, num_diff, num_minoverlap]
for i in range(min_overlaps.shape[0]):
result += print_str(
(f"{class_to_name[curcls]} "
"AP@{:.2f}, {:.2f}, {:.2f}:".format(*min_overlaps[i, :, j])))
result += print_str((f"bbox AP:{mAPbbox[j, 0, i]:.4f}, "
f"{mAPbbox[j, 1, i]:.4f}, "
f"{mAPbbox[j, 2, i]:.4f}"))
result += print_str((f"bev AP:{mAPbev[j, 0, i]:.4f}, "
f"{mAPbev[j, 1, i]:.4f}, "
f"{mAPbev[j, 2, i]:.4f}"))
result += print_str((f"3d AP:{mAP3d[j, 0, i]:.4f}, "
f"{mAP3d[j, 1, i]:.4f}, "
f"{mAP3d[j, 2, i]:.4f}"))
if compute_aos:
result += print_str((f"aos AP:{mAPaos[j, 0, i]:.2f}, "
f"{mAPaos[j, 1, i]:.2f}, "
f"{mAPaos[j, 2, i]:.2f}"))
result += print_str(
(f"{class_to_name[curcls]} "
"AP_R40@{:.2f}, {:.2f}, {:.2f}:".format(*min_overlaps[i, :, j])))
result += print_str((f"bbox AP:{mAPbbox_R40[j, 0, i]:.4f}, "
f"{mAPbbox_R40[j, 1, i]:.4f}, "
f"{mAPbbox_R40[j, 2, i]:.4f}"))
result += print_str((f"bev AP:{mAPbev_R40[j, 0, i]:.4f}, "
f"{mAPbev_R40[j, 1, i]:.4f}, "
f"{mAPbev_R40[j, 2, i]:.4f}"))
result += print_str((f"3d AP:{mAP3d_R40[j, 0, i]:.4f}, "
f"{mAP3d_R40[j, 1, i]:.4f}, "
f"{mAP3d_R40[j, 2, i]:.4f}"))
if compute_aos:
result += print_str((f"aos AP:{mAPaos_R40[j, 0, i]:.2f}, "
f"{mAPaos_R40[j, 1, i]:.2f}, "
f"{mAPaos_R40[j, 2, i]:.2f}"))
if i == 0:
ret_dict['%s_aos/easy_R40' % class_to_name[curcls]] = mAPaos_R40[j, 0, 0]
ret_dict['%s_aos/moderate_R40' % class_to_name[curcls]] = mAPaos_R40[j, 1, 0]
ret_dict['%s_aos/hard_R40' % class_to_name[curcls]] = mAPaos_R40[j, 2, 0]
if i == 0:
# ret_dict['%s_3d/easy' % class_to_name[curcls]] = mAP3d[j, 0, 0]
# ret_dict['%s_3d/moderate' % class_to_name[curcls]] = mAP3d[j, 1, 0]
# ret_dict['%s_3d/hard' % class_to_name[curcls]] = mAP3d[j, 2, 0]
# ret_dict['%s_bev/easy' % class_to_name[curcls]] = mAPbev[j, 0, 0]
# ret_dict['%s_bev/moderate' % class_to_name[curcls]] = mAPbev[j, 1, 0]
# ret_dict['%s_bev/hard' % class_to_name[curcls]] = mAPbev[j, 2, 0]
# ret_dict['%s_image/easy' % class_to_name[curcls]] = mAPbbox[j, 0, 0]
# ret_dict['%s_image/moderate' % class_to_name[curcls]] = mAPbbox[j, 1, 0]
# ret_dict['%s_image/hard' % class_to_name[curcls]] = mAPbbox[j, 2, 0]
ret_dict['%s_3d/easy_R40' % class_to_name[curcls]] = mAP3d_R40[j, 0, 0]
ret_dict['%s_3d/moderate_R40' % class_to_name[curcls]] = mAP3d_R40[j, 1, 0]
ret_dict['%s_3d/hard_R40' % class_to_name[curcls]] = mAP3d_R40[j, 2, 0]
ret_dict['%s_bev/easy_R40' % class_to_name[curcls]] = mAPbev_R40[j, 0, 0]
ret_dict['%s_bev/moderate_R40' % class_to_name[curcls]] = mAPbev_R40[j, 1, 0]
ret_dict['%s_bev/hard_R40' % class_to_name[curcls]] = mAPbev_R40[j, 2, 0]
ret_dict['%s_image/easy_R40' % class_to_name[curcls]] = mAPbbox_R40[j, 0, 0]
ret_dict['%s_image/moderate_R40' % class_to_name[curcls]] = mAPbbox_R40[j, 1, 0]
ret_dict['%s_image/hard_R40' % class_to_name[curcls]] = mAPbbox_R40[j, 2, 0]
return result, ret_dict
def get_kitti_eval_result(gt_annos, dt_annos, current_classes, iou_thres=0.5, PR_detail_dict=None):
"""Get KITTI evaluation result."""
num_classes = len(current_classes)
min_overlaps = np.array([iou_thres], dtype=np.float32)
min_overlaps = np.broadcast_to(
min_overlaps,
(1, 3, num_classes)
)
result = ''
# check whether alpha is valid
mAPbev, mAP3d = do_eval_kitti(
gt_annos, dt_annos, current_classes, min_overlaps, False, PR_detail_dict=PR_detail_dict)
ret_dict = {}
for j, curcls in enumerate(current_classes):
# mAP threshold array: [num_minoverlap, metric, class]
# mAP result: [num_class, num_diff, num_minoverlap]
for i in range(min_overlaps.shape[0]):
result += print_str(
(f"{curcls} "
"AP@{:.2f}, {:.2f}:".format(*min_overlaps[i, 1:, j])))
result += print_str((f"bev AP:{mAPbev[j, 0, i]:.4f}"))
result += print_str((f"3d AP:{mAP3d[j, 0, i]:.4f}"))
mAP3d_total = np.mean(mAP3d[:, 0, 0])
mAPbev_total = np.mean(mAPbev[:, 0, 0])
result += print_str(f"bev mAP: {mAPbev_total:.4f}")
result += print_str(f"3d mAP: {mAP3d_total:.4f}")
ret_dict["bev mAP"] = mAPbev_total
ret_dict["3d mAP"] = mAP3d_total
return result, ret_dict
def get_coco_eval_result(gt_annos, dt_annos, current_classes):
"""Get COCO evaluation result."""
class_to_name = {
0: 'Car',
1: 'Pedestrian',
2: 'Cyclist',
3: 'Van',
4: 'Person_sitting',
}
class_to_range = {
0: [0.5, 0.95, 10],
1: [0.25, 0.7, 10],
2: [0.25, 0.7, 10],
3: [0.5, 0.95, 10],
4: [0.25, 0.7, 10],
}
name_to_class = {v: n for n, v in class_to_name.items()}
if not isinstance(current_classes, (list, tuple)):
current_classes = [current_classes]
current_classes_int = []
for curcls in current_classes:
if isinstance(curcls, str):
current_classes_int.append(name_to_class[curcls])
else:
current_classes_int.append(curcls)
current_classes = current_classes_int
overlap_ranges = np.zeros([3, 3, len(current_classes)])
for i, curcls in enumerate(current_classes):
overlap_ranges[:, :, i] = np.array(
class_to_range[curcls])[:, np.newaxis]
result = ''
# check whether alpha is valid
compute_aos = False
for anno in dt_annos:
if anno['alpha'].shape[0] != 0:
if anno['alpha'][0] != -10:
compute_aos = True
break
mAPbbox, mAPbev, mAP3d, mAPaos = do_coco_style_eval(
gt_annos, dt_annos, current_classes, overlap_ranges, compute_aos)
for j, curcls in enumerate(current_classes):
# mAP threshold array: [num_minoverlap, metric, class]
# mAP result: [num_class, num_diff, num_minoverlap]
o_range = np.array(class_to_range[curcls])[[0, 2, 1]]
o_range[1] = (o_range[2] - o_range[0]) / (o_range[1] - 1)
result += print_str((f"{class_to_name[curcls]} "
"coco AP@{:.2f}:{:.2f}:{:.2f}:".format(*o_range)))
result += print_str((f"bbox AP:{mAPbbox[j, 0]:.2f}, "
f"{mAPbbox[j, 1]:.2f}, "
f"{mAPbbox[j, 2]:.2f}"))
result += print_str((f"bev AP:{mAPbev[j, 0]:.2f}, "
f"{mAPbev[j, 1]:.2f}, "
f"{mAPbev[j, 2]:.2f}"))
result += print_str((f"3d AP:{mAP3d[j, 0]:.2f}, "
f"{mAP3d[j, 1]:.2f}, "
f"{mAP3d[j, 2]:.2f}"))
if compute_aos:
result += print_str((f"aos AP:{mAPaos[j, 0]:.2f}, "
f"{mAPaos[j, 1]:.2f}, "
f"{mAPaos[j, 2]:.2f}"))
return result
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/datasets/kitti/kitti_object_eval_python/eval.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Calibration of KITTI dataset."""
import numpy as np
def get_calib_from_file(calib_file):
"""Get calibration from file."""
with open(calib_file) as f:
lines = f.readlines()
obj = lines[2].strip().split(' ')[1:]
P2 = np.array(obj, dtype=np.float32)
obj = lines[3].strip().split(' ')[1:]
P3 = np.array(obj, dtype=np.float32)
obj = lines[4].strip().split(' ')[1:]
R0 = np.array(obj, dtype=np.float32)
obj = lines[5].strip().split(' ')[1:]
Tr_velo_to_cam = np.array(obj, dtype=np.float32)
return {'P2': P2.reshape(3, 4),
'P3': P3.reshape(3, 4),
'R0': R0.reshape(3, 3),
'Tr_velo2cam': Tr_velo_to_cam.reshape(3, 4)}
class Calibration(object):
"""Calibration class."""
def __init__(self, calib_file):
"""Initialize."""
if not isinstance(calib_file, dict):
calib = get_calib_from_file(calib_file)
else:
calib = calib_file
self.P2 = calib['P2'] # 3 x 4
self.R0 = calib['R0'] # 3 x 3
self.V2C = calib['Tr_velo2cam'] # 3 x 4
# Camera intrinsics and extrinsics
self.cu = self.P2[0, 2]
self.cv = self.P2[1, 2]
self.fu = self.P2[0, 0]
self.fv = self.P2[1, 1]
self.tx = self.P2[0, 3] / (-self.fu)
self.ty = self.P2[1, 3] / (-self.fv)
def cart_to_hom(self, pts):
"""
:param pts: (N, 3 or 2)
:return pts_hom: (N, 4 or 3)
"""
pts_hom = np.hstack((pts, np.ones((pts.shape[0], 1), dtype=np.float32)))
return pts_hom
def rect_to_lidar(self, pts_rect):
"""
:param pts_lidar: (N, 3)
:return pts_rect: (N, 3)
"""
pts_rect_hom = self.cart_to_hom(pts_rect) # (N, 4)
R0_ext = np.hstack((self.R0, np.zeros((3, 1), dtype=np.float32))) # (3, 4)
R0_ext = np.vstack((R0_ext, np.zeros((1, 4), dtype=np.float32))) # (4, 4)
R0_ext[3, 3] = 1
V2C_ext = np.vstack((self.V2C, np.zeros((1, 4), dtype=np.float32))) # (4, 4)
V2C_ext[3, 3] = 1
pts_lidar = np.dot(pts_rect_hom, np.linalg.inv(np.dot(R0_ext, V2C_ext).T))
return pts_lidar[:, 0:3]
def lidar_to_rect(self, pts_lidar):
"""
:param pts_lidar: (N, 3)
:return pts_rect: (N, 3)
"""
pts_lidar_hom = self.cart_to_hom(pts_lidar)
pts_rect = np.dot(pts_lidar_hom, np.dot(self.V2C.T, self.R0.T))
# pts_rect = reduce(np.dot, (pts_lidar_hom, self.V2C.T, self.R0.T))
return pts_rect
def rect_to_img(self, pts_rect):
"""
:param pts_rect: (N, 3)
:return pts_img: (N, 2)
"""
pts_rect_hom = self.cart_to_hom(pts_rect)
pts_2d_hom = np.dot(pts_rect_hom, self.P2.T)
pts_img = (pts_2d_hom[:, 0:2].T / pts_rect_hom[:, 2]).T # (N, 2)
pts_rect_depth = pts_2d_hom[:, 2] - self.P2.T[3, 2] # depth in rect camera coord
return pts_img, pts_rect_depth
def lidar_to_img(self, pts_lidar):
"""
:param pts_lidar: (N, 3)
:return pts_img: (N, 2)
"""
pts_rect = self.lidar_to_rect(pts_lidar)
pts_img, pts_depth = self.rect_to_img(pts_rect)
return pts_img, pts_depth
def img_to_rect(self, u, v, depth_rect):
"""
:param u: (N)
:param v: (N)
:param depth_rect: (N)
:return:
"""
x = ((u - self.cu) * depth_rect) / self.fu + self.tx
y = ((v - self.cv) * depth_rect) / self.fv + self.ty
pts_rect = np.concatenate((x.reshape(-1, 1), y.reshape(-1, 1), depth_rect.reshape(-1, 1)), axis=1)
return pts_rect
def corners3d_to_img_boxes(self, corners3d):
"""
:param corners3d: (N, 8, 3) corners in rect coordinate
:return: boxes: (None, 4) [x1, y1, x2, y2] in rgb coordinate
:return: boxes_corner: (None, 8) [xi, yi] in rgb coordinate
"""
sample_num = corners3d.shape[0]
corners3d_hom = np.concatenate((corners3d, np.ones((sample_num, 8, 1))), axis=2) # (N, 8, 4)
img_pts = np.matmul(corners3d_hom, self.P2.T) # (N, 8, 3)
x, y = img_pts[:, :, 0] / img_pts[:, :, 2], img_pts[:, :, 1] / img_pts[:, :, 2]
x1, y1 = np.min(x, axis=1), np.min(y, axis=1)
x2, y2 = np.max(x, axis=1), np.max(y, axis=1)
boxes = np.concatenate((x1.reshape(-1, 1), y1.reshape(-1, 1), x2.reshape(-1, 1), y2.reshape(-1, 1)), axis=1)
boxes_corner = np.concatenate((x.reshape(-1, 8, 1), y.reshape(-1, 8, 1)), axis=2)
return boxes, boxes_corner
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/utils/calibration_kitti.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""3D object utils."""
import numpy as np
def get_objects_from_label(label_file):
"""Get objects from label."""
with open(label_file, 'r') as f:
lines = f.readlines()
objects = [Object3d(line) for line in lines]
return objects
class Object3d(object):
"""Object3d class."""
def __init__(self, line):
"""Initialize."""
label = line.strip().split(' ')
self.src = line
self.cls_type = label[0]
self.truncation = float(label[1])
self.occlusion = float(label[2]) # 0:fully visible 1:partly occluded 2:largely occluded 3:unknown
self.alpha = float(label[3])
self.box2d = np.array((float(label[4]), float(label[5]), float(label[6]), float(label[7])), dtype=np.float32)
self.h = float(label[8])
self.w = float(label[9])
self.l = float(label[10]) # noqa: E741
self.loc = np.array((float(label[11]), float(label[12]), float(label[13])), dtype=np.float32)
self.dis_to_lidar = np.linalg.norm(self.loc)
self.ry = float(label[14])
self.score = float(label[15]) if label.__len__() == 16 else -1.0
def generate_corners3d(self):
"""
generate corners3d representation for this object
:return corners_3d: (8, 3) corners of box3d in camera coord
"""
l, h, w = self.l, self.h, self.w
x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]
y_corners = [0, 0, 0, 0, -h, -h, -h, -h]
z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
R = np.array([[np.cos(self.ry), 0, np.sin(self.ry)],
[0, 1, 0],
[-np.sin(self.ry), 0, np.cos(self.ry)]])
corners3d = np.vstack([x_corners, y_corners, z_corners]) # (3, 8)
corners3d = np.dot(R, corners3d).T
corners3d = corners3d + self.loc
return corners3d
def to_str(self):
"""Convert to string."""
label_str = '%s %.2f %d %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f' \
% (self.cls_type, self.truncation, int(self.occlusion), self.alpha, self.box2d[0], self.box2d[1],
self.box2d[2], self.box2d[3], self.h, self.w, self.l, self.loc[0], self.loc[1], self.loc[2],
self.ry)
return label_str
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/utils/object3d_general.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for coordinate transforms."""
import math
import torch
try:
from kornia.geometry.conversions import (
convert_points_to_homogeneous,
convert_points_from_homogeneous,
)
except: # noqa: E722
pass
def project_to_image(project, points):
"""
Project points to image
Args:
project [torch.tensor(..., 3, 4)]: Projection matrix
points [torch.Tensor(..., 3)]: 3D points
Returns:
points_img [torch.Tensor(..., 2)]: Points in image
points_depth [torch.Tensor(...)]: Depth of each point
"""
# Reshape tensors to expected shape
points = convert_points_to_homogeneous(points)
points = points.unsqueeze(dim=-1)
project = project.unsqueeze(dim=1)
# Transform points to image and get depths
points_t = project @ points
points_t = points_t.squeeze(dim=-1)
points_img = convert_points_from_homogeneous(points_t)
points_depth = points_t[..., -1] - project[..., 2, 3]
return points_img, points_depth
def normalize_coords(coords, shape):
"""
Normalize coordinates of a grid between [-1, 1]
Args:
coords: (..., 3), Coordinates in grid
shape: (3), Grid shape
Returns:
norm_coords: (.., 3), Normalized coordinates in grid
"""
min_n = -1
max_n = 1
shape = torch.flip(shape, dims=[0]) # Reverse ordering of shape
# Subtract 1 since pixel indexing from [0, shape - 1]
norm_coords = coords / (shape - 1) * (max_n - min_n) + min_n
return norm_coords
def bin_depths(depth_map, mode, depth_min, depth_max, num_bins, target=False):
"""
Converts depth map into bin indices
Args:
depth_map: (H, W), Depth Map
mode: string, Discretiziation mode (See https://arxiv.org/pdf/2005.13423.pdf for more details)
UD: Uniform discretiziation
LID: Linear increasing discretiziation
SID: Spacing increasing discretiziation
depth_min: float, Minimum depth value
depth_max: float, Maximum depth value
num_bins: int, Number of depth bins
target: bool, Whether the depth bins indices will be used for a target tensor in loss comparison
Returns:
indices: (H, W), Depth bin indices
"""
if mode == "UD":
bin_size = (depth_max - depth_min) / num_bins
indices = ((depth_map - depth_min) / bin_size)
elif mode == "LID":
bin_size = 2 * (depth_max - depth_min) / (num_bins * (1 + num_bins))
indices = -0.5 + 0.5 * torch.sqrt(1 + 8 * (depth_map - depth_min) / bin_size)
elif mode == "SID":
indices = num_bins * (torch.log(1 + depth_map) - math.log(1 + depth_min)) / \
(math.log(1 + depth_max) - math.log(1 + depth_min))
else:
raise NotImplementedError
if target:
# Remove indicies outside of bounds
mask = (indices < 0) | (indices > num_bins) | (~torch.isfinite(indices))
indices[mask] = num_bins
# Convert to integer
indices = indices.type(torch.int64)
return indices
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/utils/transform_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Box encoder."""
import numpy as np
import torch
class ResidualCoder(object):
"""Residual box encoder."""
def __init__(self, code_size=7, encode_angle_by_sincos=False, **kwargs):
"""Initialize."""
super().__init__()
self.code_size = code_size
self.encode_angle_by_sincos = encode_angle_by_sincos
if self.encode_angle_by_sincos:
self.code_size += 1
def encode_torch(self, boxes, anchors):
"""
Args:
boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
anchors: (N, 7 + C) [x, y, z, dx, dy, dz, heading or *[cos, sin], ...]
Returns:
"""
anchors[:, 3:6] = torch.clamp_min(anchors[:, 3:6], min=1e-5)
boxes[:, 3:6] = torch.clamp_min(boxes[:, 3:6], min=1e-5)
xa, ya, za, dxa, dya, dza, ra, *cas = torch.split(anchors, 1, dim=-1)
xg, yg, zg, dxg, dyg, dzg, rg, *cgs = torch.split(boxes, 1, dim=-1)
diagonal = torch.sqrt(dxa ** 2 + dya ** 2)
xt = (xg - xa) / diagonal
yt = (yg - ya) / diagonal
zt = (zg - za) / dza
dxt = torch.log(dxg / dxa)
dyt = torch.log(dyg / dya)
dzt = torch.log(dzg / dza)
if self.encode_angle_by_sincos:
rt_cos = torch.cos(rg) - torch.cos(ra)
rt_sin = torch.sin(rg) - torch.sin(ra)
rts = [rt_cos, rt_sin]
else:
rts = [rg - ra]
cts = [g - a for g, a in zip(cgs, cas)]
return torch.cat([xt, yt, zt, dxt, dyt, dzt, *rts, *cts], dim=-1)
def decode_torch(self, box_encodings, anchors):
"""
Args:
box_encodings: (B, N, 7 + C) or (N, 7 + C) [x, y, z, dx, dy, dz, heading or *[cos, sin], ...]
anchors: (B, N, 7 + C) or (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
Returns:
"""
xa, ya, za, dxa, dya, dza, ra, *cas = torch.split(anchors, 1, dim=-1)
if not self.encode_angle_by_sincos:
xt, yt, zt, dxt, dyt, dzt, rt, *cts = torch.split(box_encodings, 1, dim=-1)
else:
xt, yt, zt, dxt, dyt, dzt, cost, sint, *cts = torch.split(box_encodings, 1, dim=-1)
diagonal = torch.sqrt(dxa ** 2 + dya ** 2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * dza + za
dxg = torch.exp(dxt) * dxa
dyg = torch.exp(dyt) * dya
dzg = torch.exp(dzt) * dza
if self.encode_angle_by_sincos:
rg_cos = cost + torch.cos(ra)
rg_sin = sint + torch.sin(ra)
rg = torch.atan2(rg_sin, rg_cos)
else:
rg = rt + ra
cgs = [t + a for t, a in zip(cts, cas)]
return torch.cat([xg, yg, zg, dxg, dyg, dzg, rg, *cgs], dim=-1)
class PreviousResidualDecoder(object):
"""Previous Residual Decoder."""
def __init__(self, code_size=7, **kwargs):
"""Initialize."""
super().__init__()
self.code_size = code_size
@staticmethod
def decode_torch(box_encodings, anchors):
"""
Args:
box_encodings: (B, N, 7 + ?) x, y, z, w, l, h, r, custom values
anchors: (B, N, 7 + C) or (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
Returns:
"""
xa, ya, za, dxa, dya, dza, ra, *cas = torch.split(anchors, 1, dim=-1)
xt, yt, zt, wt, lt, ht, rt, *cts = torch.split(box_encodings, 1, dim=-1)
diagonal = torch.sqrt(dxa ** 2 + dya ** 2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * dza + za
dxg = torch.exp(lt) * dxa
dyg = torch.exp(wt) * dya
dzg = torch.exp(ht) * dza
rg = rt + ra
cgs = [t + a for t, a in zip(cts, cas)]
return torch.cat([xg, yg, zg, dxg, dyg, dzg, rg, *cgs], dim=-1)
class PreviousResidualRoIDecoder(object):
"""Previous Residual RoI Decoder."""
def __init__(self, code_size=7, **kwargs):
"""Initialize."""
super().__init__()
self.code_size = code_size
@staticmethod
def decode_torch(box_encodings, anchors):
"""
Args:
box_encodings: (B, N, 7 + ?) x, y, z, w, l, h, r, custom values
anchors: (B, N, 7 + C) or (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
Returns:
"""
xa, ya, za, dxa, dya, dza, ra, *cas = torch.split(anchors, 1, dim=-1)
xt, yt, zt, wt, lt, ht, rt, *cts = torch.split(box_encodings, 1, dim=-1)
diagonal = torch.sqrt(dxa ** 2 + dya ** 2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * dza + za
dxg = torch.exp(lt) * dxa
dyg = torch.exp(wt) * dya
dzg = torch.exp(ht) * dza
rg = ra - rt
cgs = [t + a for t, a in zip(cts, cas)]
return torch.cat([xg, yg, zg, dxg, dyg, dzg, rg, *cgs], dim=-1)
class PointResidualCoder(object):
"""Point Residual Encoder."""
def __init__(self, code_size=8, use_mean_size=True, **kwargs):
"""Initialize."""
super().__init__()
self.code_size = code_size
self.use_mean_size = use_mean_size
if self.use_mean_size:
self.mean_size = torch.from_numpy(np.array(kwargs['mean_size'])).cuda().float()
assert self.mean_size.min() > 0
def encode_torch(self, gt_boxes, points, gt_classes=None):
"""
Args:
gt_boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
points: (N, 3) [x, y, z]
gt_classes: (N) [1, num_classes]
Returns:
box_coding: (N, 8 + C)
"""
gt_boxes[:, 3:6] = torch.clamp_min(gt_boxes[:, 3:6], min=1e-5)
xg, yg, zg, dxg, dyg, dzg, rg, *cgs = torch.split(gt_boxes, 1, dim=-1)
xa, ya, za = torch.split(points, 1, dim=-1)
if self.use_mean_size:
assert gt_classes.max() <= self.mean_size.shape[0]
point_anchor_size = self.mean_size[gt_classes - 1]
dxa, dya, dza = torch.split(point_anchor_size, 1, dim=-1)
diagonal = torch.sqrt(dxa ** 2 + dya ** 2)
xt = (xg - xa) / diagonal
yt = (yg - ya) / diagonal
zt = (zg - za) / dza
dxt = torch.log(dxg / dxa)
dyt = torch.log(dyg / dya)
dzt = torch.log(dzg / dza)
else:
xt = (xg - xa)
yt = (yg - ya)
zt = (zg - za)
dxt = torch.log(dxg)
dyt = torch.log(dyg)
dzt = torch.log(dzg)
cts = [g for g in cgs]
return torch.cat([xt, yt, zt, dxt, dyt, dzt, torch.cos(rg), torch.sin(rg), *cts], dim=-1)
def decode_torch(self, box_encodings, points, pred_classes=None):
"""
Args:
box_encodings: (N, 8 + C) [x, y, z, dx, dy, dz, cos, sin, ...]
points: [x, y, z]
pred_classes: (N) [1, num_classes]
Returns:
"""
xt, yt, zt, dxt, dyt, dzt, cost, sint, *cts = torch.split(box_encodings, 1, dim=-1)
xa, ya, za = torch.split(points, 1, dim=-1)
if self.use_mean_size:
assert pred_classes.max() <= self.mean_size.shape[0]
point_anchor_size = self.mean_size[pred_classes - 1]
dxa, dya, dza = torch.split(point_anchor_size, 1, dim=-1)
diagonal = torch.sqrt(dxa ** 2 + dya ** 2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * dza + za
dxg = torch.exp(dxt) * dxa
dyg = torch.exp(dyt) * dya
dzg = torch.exp(dzt) * dza
else:
xg = xt + xa
yg = yt + ya
zg = zt + za
dxg, dyg, dzg = torch.split(torch.exp(box_encodings[..., 3:6]), 1, dim=-1)
rg = torch.atan2(sint, cost)
cgs = [t for t in cts]
return torch.cat([xg, yg, zg, dxg, dyg, dzg, rg, *cgs], dim=-1)
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/utils/box_coder_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Safe Unpickler to avoild unsafe deserialization."""
import pickle
from io import BytesIO
class SafeUnpickler(pickle.Unpickler):
"""
Custom unpickler that only allows deserialization of a specified class.
"""
def __init__(self, serialized_data: bytes, class_name: str):
"""
Initialize the unpickler with the serialized data and the name of the class to allow deserialization for.
Args:
serialized_data (bytes): The serialized data to be deserialized.
class_name (string): The name of the class to be deserialized.
"""
self.class_name = class_name
super().__init__(BytesIO(serialized_data))
def find_class(self, module: str, name: str) -> type:
"""
Override the default find_class() method to only allow the specified class to be deserialized.
Args:
module (string): The module name.
name (string): The class name.
Returns:
type: The specified class.
"""
# Only allow the specified class to be deserialized
if name == self.class_name:
return globals()[name]
# Raise an exception for all other classes
raise pickle.UnpicklingError("Invalid class: %s.%s" % (module, name))
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/utils/safe_unpickler.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Box utils."""
import numpy as np
from scipy.spatial import Delaunay
import torch
from ..ops.roiaware_pool3d import roiaware_pool3d_utils
from . import common_utils
def in_hull(p, hull):
"""
:param p: (N, K) test points
:param hull: (M, K) M corners of a box
:return (N) bool
"""
try:
if not isinstance(hull, Delaunay):
hull = Delaunay(hull)
flag = hull.find_simplex(p) >= 0
except Exception:
print('Warning: not a hull %s' % str(hull))
flag = np.zeros(p.shape[0], dtype=np.bool)
return flag
def boxes_to_corners_3d(boxes3d):
"""
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
Args:
boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
Returns:
"""
boxes3d, is_numpy = common_utils.check_numpy_to_torch(boxes3d)
template = boxes3d.new_tensor((
[1, 1, -1], [1, -1, -1], [-1, -1, -1], [-1, 1, -1],
[1, 1, 1], [1, -1, 1], [-1, -1, 1], [-1, 1, 1],
)) / 2
corners3d = boxes3d[:, None, 3:6].repeat(1, 8, 1) * template[None, :, :]
corners3d = common_utils.rotate_points_along_z(corners3d.view(-1, 8, 3), boxes3d[:, 6]).view(-1, 8, 3)
corners3d += boxes3d[:, None, 0:3]
return corners3d.numpy() if is_numpy else corners3d
def mask_boxes_outside_range_numpy(boxes, limit_range, min_num_corners=1):
"""
Args:
boxes: (N, 7) [x, y, z, dx, dy, dz, heading, ...], (x, y, z) is the box center
limit_range: [minx, miny, minz, maxx, maxy, maxz]
min_num_corners:
Returns:
"""
if boxes.shape[1] > 7:
boxes = boxes[:, 0:7]
corners = boxes_to_corners_3d(boxes) # (N, 8, 3)
mask = ((corners >= limit_range[0:3]) & (corners <= limit_range[3:6])).all(axis=2)
mask = mask.sum(axis=1) >= min_num_corners # (N)
return mask
def remove_points_in_boxes3d(points, boxes3d):
"""
Args:
points: (num_points, 3 + C)
boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center, each box DO NOT overlaps
Returns:
"""
boxes3d, is_numpy = common_utils.check_numpy_to_torch(boxes3d)
points, is_numpy = common_utils.check_numpy_to_torch(points)
point_masks = roiaware_pool3d_utils.points_in_boxes_cpu(points[:, 0:3], boxes3d)
points = points[point_masks.sum(dim=0) == 0]
return points.numpy() if is_numpy else points
def boxes3d_kitti_camera_to_lidar(boxes3d_camera, calib):
"""
Args:
boxes3d_camera: (N, 7) [x, y, z, l, h, w, r] in rect camera coords
calib:
Returns:
boxes3d_lidar: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
"""
xyz_camera = boxes3d_camera[:, 0:3]
l, h, w, r = boxes3d_camera[:, 3:4], boxes3d_camera[:, 4:5], boxes3d_camera[:, 5:6], boxes3d_camera[:, 6:7]
xyz_lidar = calib.rect_to_lidar(xyz_camera)
xyz_lidar[:, 2] += h[:, 0] / 2
return np.concatenate([xyz_lidar, l, w, h, -(r + np.pi / 2)], axis=-1)
def boxes3d_kitti_fakelidar_to_lidar(boxes3d_lidar):
"""
Args:
boxes3d_fakelidar: (N, 7) [x, y, z, w, l, h, r] in old LiDAR coordinates, z is bottom center
Returns:
boxes3d_lidar: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
"""
w, l, h, r = boxes3d_lidar[:, 3:4], boxes3d_lidar[:, 4:5], boxes3d_lidar[:, 5:6], boxes3d_lidar[:, 6:7]
boxes3d_lidar[:, 2] += h[:, 0] / 2
return np.concatenate([boxes3d_lidar[:, 0:3], l, w, h, -(r + np.pi / 2)], axis=-1)
def boxes3d_kitti_lidar_to_fakelidar(boxes3d_lidar):
"""
Args:
boxes3d_lidar: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
Returns:
boxes3d_fakelidar: [x, y, z, w, l, h, r] in old LiDAR coordinates, z is bottom center
"""
dx, dy, dz, heading = boxes3d_lidar[:, 3:4], boxes3d_lidar[:, 4:5], boxes3d_lidar[:, 5:6], boxes3d_lidar[:, 6:7]
boxes3d_lidar[:, 2] -= dz[:, 0] / 2
return np.concatenate([boxes3d_lidar[:, 0:3], dy, dx, dz, -heading - np.pi / 2], axis=-1)
def enlarge_box3d(boxes3d, extra_width=(0, 0, 0)):
"""
Args:
boxes3d: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
extra_width: [extra_x, extra_y, extra_z]
Returns:
"""
boxes3d = common_utils.check_numpy_to_torch(boxes3d)[0]
large_boxes3d = boxes3d.clone()
large_boxes3d[:, 3:6] += boxes3d.new_tensor(extra_width)[None, :]
return large_boxes3d
def boxes3d_lidar_to_kitti_camera(boxes3d_lidar, calib):
"""
:param boxes3d_lidar: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
:param calib:
:return:
boxes3d_camera: (N, 7) [x, y, z, l, h, w, r] in rect camera coords
"""
xyz_lidar = boxes3d_lidar[:, 0:3]
l, w, h, r = boxes3d_lidar[:, 3:4], boxes3d_lidar[:, 4:5], boxes3d_lidar[:, 5:6], boxes3d_lidar[:, 6:7]
xyz_lidar[:, 2] -= h.reshape(-1) / 2
xyz_cam = calib.lidar_to_rect(xyz_lidar)
# xyz_cam[:, 1] += h.reshape(-1) / 2
r = -r - np.pi / 2
return np.concatenate([xyz_cam, l, h, w, r], axis=-1)
def boxes3d_to_corners3d_kitti_camera(boxes3d, bottom_center=True):
"""
:param boxes3d: (N, 7) [x, y, z, l, h, w, ry] in camera coords, see the definition of ry in KITTI dataset
:param bottom_center: whether y is on the bottom center of object
:return: corners3d: (N, 8, 3)
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
"""
boxes_num = boxes3d.shape[0]
l, h, w = boxes3d[:, 3], boxes3d[:, 4], boxes3d[:, 5]
x_corners = np.array([l / 2., l / 2., -l / 2., -l / 2., l / 2., l / 2., -l / 2., -l / 2], dtype=np.float32).T
z_corners = np.array([w / 2., -w / 2., -w / 2., w / 2., w / 2., -w / 2., -w / 2., w / 2.], dtype=np.float32).T
if bottom_center:
y_corners = np.zeros((boxes_num, 8), dtype=np.float32)
y_corners[:, 4:8] = -h.reshape(boxes_num, 1).repeat(4, axis=1) # (N, 8)
else:
y_corners = np.array([h / 2., h / 2., h / 2., h / 2., -h / 2., -h / 2., -h / 2., -h / 2.], dtype=np.float32).T
ry = boxes3d[:, 6]
zeros, ones = np.zeros(ry.size, dtype=np.float32), np.ones(ry.size, dtype=np.float32)
rot_list = np.array([[np.cos(ry), zeros, -np.sin(ry)],
[zeros, ones, zeros],
[np.sin(ry), zeros, np.cos(ry)]]) # (3, 3, N)
R_list = np.transpose(rot_list, (2, 0, 1)) # (N, 3, 3)
temp_corners = np.concatenate((x_corners.reshape(-1, 8, 1), y_corners.reshape(-1, 8, 1),
z_corners.reshape(-1, 8, 1)), axis=2) # (N, 8, 3)
rotated_corners = np.matmul(temp_corners, R_list) # (N, 8, 3)
x_corners, y_corners, z_corners = rotated_corners[:, :, 0], rotated_corners[:, :, 1], rotated_corners[:, :, 2]
x_loc, y_loc, z_loc = boxes3d[:, 0], boxes3d[:, 1], boxes3d[:, 2]
x = x_loc.reshape(-1, 1) + x_corners.reshape(-1, 8)
y = y_loc.reshape(-1, 1) + y_corners.reshape(-1, 8)
z = z_loc.reshape(-1, 1) + z_corners.reshape(-1, 8)
corners = np.concatenate((x.reshape(-1, 8, 1), y.reshape(-1, 8, 1), z.reshape(-1, 8, 1)), axis=2)
return corners.astype(np.float32)
def boxes3d_kitti_camera_to_imageboxes(boxes3d, calib, image_shape=None):
"""
:param boxes3d: (N, 7) [x, y, z, l, h, w, r] in rect camera coords
:param calib:
:return:
box_2d_preds: (N, 4) [x1, y1, x2, y2]
"""
corners3d = boxes3d_to_corners3d_kitti_camera(boxes3d)
pts_img, _ = calib.rect_to_img(corners3d.reshape(-1, 3))
corners_in_image = pts_img.reshape(-1, 8, 2)
min_uv = np.min(corners_in_image, axis=1) # (N, 2)
max_uv = np.max(corners_in_image, axis=1) # (N, 2)
boxes2d_image = np.concatenate([min_uv, max_uv], axis=1)
if image_shape is not None:
boxes2d_image[:, 0] = np.clip(boxes2d_image[:, 0], a_min=0, a_max=image_shape[1] - 1)
boxes2d_image[:, 1] = np.clip(boxes2d_image[:, 1], a_min=0, a_max=image_shape[0] - 1)
boxes2d_image[:, 2] = np.clip(boxes2d_image[:, 2], a_min=0, a_max=image_shape[1] - 1)
boxes2d_image[:, 3] = np.clip(boxes2d_image[:, 3], a_min=0, a_max=image_shape[0] - 1)
return boxes2d_image
def boxes_iou_normal(boxes_a, boxes_b):
"""
Args:
boxes_a: (N, 4) [x1, y1, x2, y2]
boxes_b: (M, 4) [x1, y1, x2, y2]
Returns:
"""
assert boxes_a.shape[1] == boxes_b.shape[1] == 4
x_min = torch.max(boxes_a[:, 0, None], boxes_b[None, :, 0])
x_max = torch.min(boxes_a[:, 2, None], boxes_b[None, :, 2])
y_min = torch.max(boxes_a[:, 1, None], boxes_b[None, :, 1])
y_max = torch.min(boxes_a[:, 3, None], boxes_b[None, :, 3])
x_len = torch.clamp_min(x_max - x_min, min=0)
y_len = torch.clamp_min(y_max - y_min, min=0)
area_a = (boxes_a[:, 2] - boxes_a[:, 0]) * (boxes_a[:, 3] - boxes_a[:, 1])
area_b = (boxes_b[:, 2] - boxes_b[:, 0]) * (boxes_b[:, 3] - boxes_b[:, 1])
a_intersect_b = x_len * y_len
iou = a_intersect_b / torch.clamp_min(area_a[:, None] + area_b[None, :] - a_intersect_b, min=1e-6)
return iou
def boxes3d_lidar_to_aligned_bev_boxes(boxes3d):
"""
Args:
boxes3d: (N, 7 + C) [x, y, z, dx, dy, dz, heading] in lidar coordinate
Returns:
aligned_bev_boxes: (N, 4) [x1, y1, x2, y2] in the above lidar coordinate
"""
rot_angle = common_utils.limit_period(boxes3d[:, 6], offset=0.5, period=np.pi).abs()
choose_dims = torch.where(rot_angle[:, None] < np.pi / 4, boxes3d[:, [3, 4]], boxes3d[:, [4, 3]])
aligned_bev_boxes = torch.cat((boxes3d[:, 0:2] - choose_dims / 2, boxes3d[:, 0:2] + choose_dims / 2), dim=1)
return aligned_bev_boxes
def boxes3d_nearest_bev_iou(boxes_a, boxes_b):
"""
Args:
boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
boxes_b: (N, 7) [x, y, z, dx, dy, dz, heading]
Returns:
"""
boxes_bev_a = boxes3d_lidar_to_aligned_bev_boxes(boxes_a)
boxes_bev_b = boxes3d_lidar_to_aligned_bev_boxes(boxes_b)
return boxes_iou_normal(boxes_bev_a, boxes_bev_b)
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/utils/box_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utils."""
import logging
import os
import pickle
import random
import shutil
import subprocess
import numpy as np
import torch
import tempfile
import torch.distributed as dist
import torch.multiprocessing as mp
def check_numpy_to_torch(x):
"""Check and convert numpy array to torch tensor."""
if isinstance(x, np.ndarray):
return torch.from_numpy(x).float(), True
return x, False
def limit_period(val, offset=0.5, period=np.pi):
"""Limit period."""
val, is_numpy = check_numpy_to_torch(val)
ans = val - torch.floor(val / period + offset) * period
return ans.numpy() if is_numpy else ans
def drop_info_with_name(info, name, exclude=None):
"""Drop info with name."""
ret_info = {}
keep_indices = [i for i, x in enumerate(info['name']) if x != name]
exclude_list = exclude or []
for key in info.keys():
if key not in exclude_list:
ret_info[key] = info[key][keep_indices]
else:
ret_info[key] = info[key]
return ret_info
def rotate_points_along_z(points, angle):
"""
Args:
points: (B, N, 3 + C)
angle: (B), angle along z-axis, angle increases x ==> y
Returns:
"""
points, is_numpy = check_numpy_to_torch(points)
angle, _ = check_numpy_to_torch(angle)
cosa = torch.cos(angle)
sina = torch.sin(angle)
zeros = angle.new_zeros(points.shape[0])
ones = angle.new_ones(points.shape[0])
rot_matrix = torch.stack((
cosa, sina, zeros,
-sina, cosa, zeros,
zeros, zeros, ones
), dim=1).view(-1, 3, 3).float()
points_rot = torch.matmul(points[:, :, 0:3], rot_matrix)
points_rot = torch.cat((points_rot, points[:, :, 3:]), dim=-1)
return points_rot.numpy() if is_numpy else points_rot
def mask_points_by_range(points, limit_range):
"""Mask points by range."""
mask = (points[:, 0] >= limit_range[0]) & (points[:, 0] <= limit_range[3]) \
& (points[:, 1] >= limit_range[1]) & (points[:, 1] <= limit_range[4])
return mask
def get_voxel_centers(voxel_coords, downsample_times, voxel_size, point_cloud_range):
"""
Args:
voxel_coords: (N, 3)
downsample_times:
voxel_size:
point_cloud_range:
Returns:
"""
assert voxel_coords.shape[1] == 3
voxel_centers = voxel_coords[:, [2, 1, 0]].float() # (xyz)
voxel_size = torch.tensor(voxel_size, device=voxel_centers.device).float() * downsample_times
pc_range = torch.tensor(point_cloud_range[0:3], device=voxel_centers.device).float()
voxel_centers = (voxel_centers + 0.5) * voxel_size + pc_range
return voxel_centers
def create_logger(log_file=None, rank=0, log_level=logging.INFO):
"""Create logger."""
logger = logging.getLogger(__name__)
logger.setLevel(log_level if rank == 0 else 'ERROR')
formatter = logging.Formatter('%(asctime)s %(levelname)5s %(message)s')
if log_file is not None:
file_handler = logging.FileHandler(filename=log_file)
file_handler.setLevel(log_level if rank == 0 else 'ERROR')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
def set_random_seed(seed):
"""Set random seed."""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def keep_arrays_by_name(gt_names, used_classes):
"""Keep arrays by name."""
inds = [i for i, x in enumerate(gt_names) if x in used_classes]
inds = np.array(inds, dtype=np.int64)
return inds
def init_dist_slurm(tcp_port, local_rank, backend='nccl'):
"""
modified from https://github.com/open-mmlab/mmdetection
Args:
tcp_port:
backend:
Returns:
"""
proc_id = int(os.environ['SLURM_PROCID'])
ntasks = int(os.environ['SLURM_NTASKS'])
node_list = os.environ['SLURM_NODELIST']
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(proc_id % num_gpus)
addr = subprocess.getoutput('scontrol show hostname {} | head -n1'.format(node_list))
os.environ['MASTER_PORT'] = str(tcp_port)
os.environ['MASTER_ADDR'] = addr
os.environ['WORLD_SIZE'] = str(ntasks)
os.environ['RANK'] = str(proc_id)
dist.init_process_group(backend=backend)
total_gpus = dist.get_world_size()
rank = dist.get_rank()
return total_gpus, rank
def init_dist_pytorch(local_rank, backend='nccl'):
"""Initialize distributed PyTorch."""
if mp.get_start_method(allow_none=True) is None:
mp.set_start_method('spawn')
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(local_rank % num_gpus)
dist.init_process_group(
backend=backend
)
rank = dist.get_rank()
return num_gpus, rank
def get_dist_info():
"""Get distributed PyTorch version."""
if torch.__version__ < '1.0':
initialized = dist._initialized
else:
if dist.is_available():
initialized = dist.is_initialized()
else:
initialized = False
if initialized:
rank = dist.get_rank()
world_size = dist.get_world_size()
else:
rank = 0
world_size = 1
return rank, world_size
def merge_results_dist(result_part, size):
"""Merge results for distributed Pytorch."""
rank, world_size = get_dist_info()
# os.makedirs(tmpdir, exist_ok=True)
tmpdir = tempfile.mkdtemp(dir='/tmp')
dist.barrier()
pickle.dump(result_part, open(os.path.join(tmpdir, 'result_part_{}.pkl'.format(rank)), 'wb'))
dist.barrier()
if rank != 0:
return None
part_list = []
for i in range(world_size):
part_file = os.path.join(tmpdir, 'result_part_{}.pkl'.format(i))
part_list.append(pickle.load(open(part_file, 'rb')))
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
ordered_results = ordered_results[:size]
shutil.rmtree(tmpdir)
return ordered_results
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/utils/common_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loss function utils."""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import box_utils
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(SigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(inp: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
inp: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(inp, min=0) - inp * target + torch.log1p(torch.exp(-torch.abs(inp)))
return loss
def forward(self, inp: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
inp: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(inp)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(inp, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
class WeightedSmoothL1Loss(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
@staticmethod
def smooth_l1_loss(diff, beta):
"""Smooth L1 loss."""
if beta < 1e-5:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
def forward(self, inp: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
inp: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), inp, target) # ignore nan targets
diff = inp - target
# code-wise weighting
if self.code_weights.device != diff.device:
self.code_weights = self.code_weights.to(diff.device)
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedL1Loss(nn.Module):
"""Weighted L1 loss."""
def __init__(self, code_weights: list = None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
def forward(self, inp: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
inp: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), inp, target) # ignore nan targets
diff = inp - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def forward(self, inp: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
inp: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
inp = inp.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(inp, target, reduction='none') * weights
return loss
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),
torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1)
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/utils/loss_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""3D object KITTI utils."""
import numpy as np
def get_objects_from_label(label_file):
"""Get objects from label."""
with open(label_file, 'r') as f:
lines = f.readlines()
objects = [Object3d(line) for line in lines]
return objects
def cls_type_to_id(cls_type):
"""Convert class type to ID."""
type_to_id = {'Car': 1, 'Pedestrian': 2, 'Cyclist': 3, 'Van': 4}
if cls_type not in type_to_id.keys():
return -1
return type_to_id[cls_type]
class Object3d(object):
"""Object3d class."""
def __init__(self, line):
"""Initialize."""
label = line.strip().split(' ')
self.src = line
self.cls_type = label[0]
self.cls_id = cls_type_to_id(self.cls_type)
self.truncation = float(label[1])
self.occlusion = float(label[2]) # 0:fully visible 1:partly occluded 2:largely occluded 3:unknown
self.alpha = float(label[3])
self.box2d = np.array((float(label[4]), float(label[5]), float(label[6]), float(label[7])), dtype=np.float32)
self.h = float(label[8])
self.w = float(label[9])
self.l = float(label[10]) # noqa: E741
self.loc = np.array((float(label[11]), float(label[12]), float(label[13])), dtype=np.float32)
self.dis_to_cam = np.linalg.norm(self.loc)
self.ry = float(label[14])
self.score = float(label[15]) if label.__len__() == 16 else -1.0
self.level_str = None
self.level = self.get_kitti_obj_level()
def get_kitti_obj_level(self):
"""Get KITTI object difficult level."""
height = float(self.box2d[3]) - float(self.box2d[1]) + 1
if height >= 40 and self.truncation <= 0.15 and self.occlusion <= 0:
self.level_str = 'Easy'
return 0 # Easy
if height >= 25 and self.truncation <= 0.3 and self.occlusion <= 1:
self.level_str = 'Moderate'
return 1 # Moderate
if height >= 25 and self.truncation <= 0.5 and self.occlusion <= 2:
self.level_str = 'Hard'
return 2 # Hard
self.level_str = 'UnKnown'
return -1
def generate_corners3d(self):
"""
generate corners3d representation for this object
:return corners_3d: (8, 3) corners of box3d in camera coord
"""
l, h, w = self.l, self.h, self.w
x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]
y_corners = [0, 0, 0, 0, -h, -h, -h, -h]
z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
R = np.array([[np.cos(self.ry), 0, np.sin(self.ry)],
[0, 1, 0],
[-np.sin(self.ry), 0, np.cos(self.ry)]])
corners3d = np.vstack([x_corners, y_corners, z_corners]) # (3, 8)
corners3d = np.dot(R, corners3d).T
corners3d = corners3d + self.loc
return corners3d
def to_str(self):
"""Convert to string."""
print_str = '%s %.3f %.3f %.3f box2d: %s hwl: [%.3f %.3f %.3f] pos: %s ry: %.3f' \
% (self.cls_type, self.truncation, self.occlusion, self.alpha, self.box2d, self.h, self.w, self.l,
self.loc, self.ry)
return print_str
def to_kitti_format(self):
"""Convert to KITTI format."""
kitti_str = '%s %.2f %d %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f' \
% (self.cls_type, self.truncation, int(self.occlusion), self.alpha, self.box2d[0], self.box2d[1],
self.box2d[2], self.box2d[3], self.h, self.w, self.l, self.loc[0], self.loc[1], self.loc[2],
self.ry)
return kitti_str
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/utils/object3d_kitti.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PointPillars models."""
from collections import namedtuple
import os
import tempfile
import numpy as np
import torch
from .detectors import build_detector
from eff.core.codec import decrypt_stream
def decrypt_pytorch(input_file_name, output_file_name, key):
"""Decrypt the TLT model to Pytorch model"""
with open(input_file_name, "rb") as open_temp_file, open(output_file_name, "wb") as open_encoded_file:
decrypt_stream(
input_stream=open_temp_file, output_stream=open_encoded_file,
passphrase=key, encryption=True
)
def load_checkpoint(model_path, key, to_cpu=False):
"""Helper function to load a saved checkpoint."""
loc_type = torch.device('cpu') if to_cpu else None
if model_path.endswith(".tlt"):
handle, temp_name = tempfile.mkstemp(".pth")
os.close(handle)
decrypt_pytorch(model_path, temp_name, key)
loaded_model = torch.load(temp_name, map_location=loc_type)
os.remove(temp_name)
else:
loaded_model = torch.load(model_path, map_location=loc_type)
epoch = it = 0
opt_state = None
# It can be a dict or a bare-metal model
if isinstance(loaded_model, dict):
if "model" in loaded_model and loaded_model["model"] is not None:
model = loaded_model["model"]
else:
raise KeyError(f"Key `model` not found in model loaded from {model_path}")
# Load optimizer states
if (
"optimizer_state" in loaded_model and
loaded_model["optimizer_state"] is not None
):
opt_state = loaded_model["optimizer_state"]
epoch = loaded_model.get('epoch', 0)
it = loaded_model.get('it', 0.0)
else:
model = loaded_model
# It can be a DDP wrapper
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model = model.module
return model, opt_state, epoch, it
def build_network(model_cfg, num_class, dataset):
"""Build the network."""
model = build_detector(
model_cfg=model_cfg, num_class=num_class, dataset=dataset
)
return model
def build_model_and_optimizer(
model_cfg, num_class, dataset,
pruned_model_path, resume_training_checkpoint_path,
pretrained_model_path, to_cpu, logger,
key
):
"""Build model and optimizer."""
epoch = 0
it = 0.
opt_state = None
if resume_training_checkpoint_path is not None:
# Case 1: resume an interrupted training
model, opt_state, epoch, it = load_checkpoint(
resume_training_checkpoint_path, key, to_cpu
)
logger.info(f"Model resumed from: {resume_training_checkpoint_path}")
elif pruned_model_path is not None:
# Case 2: retrain a possibly pruned model
# No optimizer states in pruned model
model = load_checkpoint(pruned_model_path, key, to_cpu)[0]
logger.info(f"Pruned model loaded from: {pruned_model_path}")
else:
# Case 3: Build a new model from scratch
model = build_network(model_cfg, num_class, dataset)
# Case 4: Using pretrained weights
if pretrained_model_path is not None:
pretrained_model = load_checkpoint(pretrained_model_path, key, to_cpu)[0]
loaded_state_dict = pretrained_model.state_dict()
current_model_dict = model.state_dict()
new_state_dict = dict()
for k in current_model_dict:
if (
k in loaded_state_dict and
loaded_state_dict[k].size() == current_model_dict[k].size()
):
new_state_dict.update({k: loaded_state_dict[k]})
model.load_state_dict(new_state_dict, strict=False)
new_model_dict = model.state_dict()
loaded_layers = []
unloaded_layers = []
for n in new_model_dict:
if n in new_state_dict:
loaded_layers.append(n)
else:
unloaded_layers.append(n)
logger.info("Layers initialized from pretrained model:")
logger.info("=" * 30)
for m in loaded_layers:
logger.info(m)
logger.info("=" * 30)
logger.info("Layers initialized randomly:")
logger.info("=" * 30)
for m in unloaded_layers:
logger.info(m)
logger.info("=" * 30)
logger.info(f"Pretrained weights loaded from: {pretrained_model_path}")
return model, opt_state, epoch, it
def load_data_to_gpu(batch_dict):
"""Load data to GPU."""
for key, val in batch_dict.items():
if not isinstance(val, np.ndarray):
continue
if key in ['frame_id', 'metadata', 'calib', 'image_shape']:
continue
batch_dict[key] = torch.from_numpy(val).float().cuda()
def model_fn_decorator():
"""Model function decorator for training."""
ModelReturn = namedtuple('ModelReturn', ['loss', 'tb_dict', 'disp_dict'])
def model_func(model, batch_dict):
"""Custom model function."""
load_data_to_gpu(batch_dict)
ret_dict, tb_dict, disp_dict = model(batch_dict)
loss = ret_dict['loss'].mean()
if hasattr(model, 'update_global_step'):
model.update_global_step()
else:
model.module.update_global_step()
return ModelReturn(loss, tb_dict, disp_dict)
return model_func
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/models/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Anchor Head Template."""
import numpy as np
import torch
import torch.nn as nn
from ...utils import box_coder_utils, common_utils, loss_utils
from .target_assigner.anchor_generator import AnchorGenerator
from .target_assigner.axis_aligned_target_assigner import AxisAlignedTargetAssigner
class AnchorHeadTemplate(nn.Module):
"""AnchorHeadTemplate class."""
def __init__(self, model_cfg, num_class, class_names, grid_size, point_cloud_range, predict_boxes_when_training):
"""Initialize."""
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.class_names = class_names
self.predict_boxes_when_training = predict_boxes_when_training
self.use_multihead = self.model_cfg.get('USE_MULTIHEAD', False)
anchor_target_cfg = self.model_cfg.target_assigner_config
self.box_coder = getattr(box_coder_utils, anchor_target_cfg.box_coder)(
num_dir_bins=anchor_target_cfg.get('num_dir_bins', 6),
**anchor_target_cfg.get('BOX_CODER_CONFIG', {})
)
anchor_generator_cfg = self.model_cfg.anchor_generator_config
anchors, self.num_anchors_per_location = self.generate_anchors(
anchor_generator_cfg, grid_size=grid_size, point_cloud_range=point_cloud_range,
anchor_ndim=self.box_coder.code_size
)
self.anchors = [x.cuda() for x in anchors]
self.target_assigner = self.get_target_assigner(anchor_target_cfg)
self.forward_ret_dict = {}
self.build_losses(self.model_cfg.loss_config)
@staticmethod
def generate_anchors(anchor_generator_cfg, grid_size, point_cloud_range, anchor_ndim=7):
"""Generate anchors."""
anchor_generator = AnchorGenerator(
anchor_range=point_cloud_range,
anchor_generator_config=anchor_generator_cfg
)
feature_map_size = [grid_size[:2] // config['feature_map_stride'] for config in anchor_generator_cfg]
anchors_list, num_anchors_per_location_list = anchor_generator.generate_anchors(feature_map_size)
if anchor_ndim != 7:
for idx, anchors in enumerate(anchors_list):
pad_zeros = anchors.new_zeros([*anchors.shape[0:-1], anchor_ndim - 7])
new_anchors = torch.cat((anchors, pad_zeros), dim=-1)
anchors_list[idx] = new_anchors
return anchors_list, num_anchors_per_location_list
def get_target_assigner(self, anchor_target_cfg):
"""Get target assigner."""
if anchor_target_cfg.name == 'AxisAlignedTargetAssigner':
target_assigner = AxisAlignedTargetAssigner(
model_cfg=self.model_cfg,
class_names=self.class_names,
box_coder=self.box_coder,
match_height=anchor_target_cfg.match_height
)
else:
raise NotImplementedError
return target_assigner
def build_losses(self, losses_cfg):
"""Build losses."""
self.add_module(
'cls_loss_func',
loss_utils.SigmoidFocalClassificationLoss(alpha=0.25, gamma=2.0)
)
reg_loss_name = 'WeightedSmoothL1Loss' if losses_cfg.get('REG_LOSS_TYPE', None) is None \
else losses_cfg.REG_LOSS_TYPE
self.add_module(
'reg_loss_func',
getattr(loss_utils, reg_loss_name)(code_weights=losses_cfg.loss_weights['code_weights'])
)
self.add_module(
'dir_loss_func',
loss_utils.WeightedCrossEntropyLoss()
)
def assign_targets(self, gt_boxes):
"""
Args:
gt_boxes: (B, M, 8)
Returns:
"""
if self.anchors[0].device != gt_boxes.device:
self.anchors = [a.to(gt_boxes.device) for a in self.anchors]
targets_dict = self.target_assigner.assign_targets(
self.anchors, gt_boxes
)
return targets_dict
def get_cls_layer_loss(self):
"""Get class layer's loss."""
cls_preds = self.forward_ret_dict['cls_preds']
box_cls_labels = self.forward_ret_dict['box_cls_labels']
batch_size = int(cls_preds.shape[0])
cared = box_cls_labels >= 0 # [N, num_anchors]
positives = box_cls_labels > 0
negatives = box_cls_labels == 0
negative_cls_weights = negatives * 1.0
cls_weights = (negative_cls_weights + 1.0 * positives).float()
reg_weights = positives.float()
if self.num_class == 1:
# class agnostic
box_cls_labels[positives] = 1
pos_normalizer = positives.sum(1, keepdim=True).float()
reg_weights /= torch.clamp(pos_normalizer, min=1.0)
cls_weights /= torch.clamp(pos_normalizer, min=1.0)
cls_targets = box_cls_labels * cared.type_as(box_cls_labels)
cls_targets = cls_targets.unsqueeze(dim=-1)
cls_targets = cls_targets.squeeze(dim=-1)
one_hot_targets = torch.zeros(
*list(cls_targets.shape), self.num_class + 1, dtype=cls_preds.dtype, device=cls_targets.device
)
one_hot_targets.scatter_(-1, cls_targets.unsqueeze(dim=-1).long(), 1.0)
cls_preds = cls_preds.view(batch_size, -1, self.num_class)
one_hot_targets = one_hot_targets[..., 1:]
cls_loss_src = self.cls_loss_func(cls_preds, one_hot_targets, weights=cls_weights) # [N, M]
cls_loss = cls_loss_src.sum() / batch_size
cls_loss = cls_loss * self.model_cfg.loss_config.loss_weights['cls_weight']
tb_dict = {
'rpn_loss_cls': cls_loss.item()
}
return cls_loss, tb_dict
@staticmethod
def add_sin_difference(boxes1, boxes2, dim=6):
"""Add Sin difference."""
assert dim != -1
rad_pred_encoding = torch.sin(boxes1[..., dim:dim + 1]) * torch.cos(boxes2[..., dim:dim + 1])
rad_tg_encoding = torch.cos(boxes1[..., dim:dim + 1]) * torch.sin(boxes2[..., dim:dim + 1])
boxes1 = torch.cat([boxes1[..., :dim], rad_pred_encoding, boxes1[..., dim + 1:]], dim=-1)
boxes2 = torch.cat([boxes2[..., :dim], rad_tg_encoding, boxes2[..., dim + 1:]], dim=-1)
return boxes1, boxes2
@staticmethod
def get_direction_target(anchors, reg_targets, one_hot=True, dir_offset=0, num_bins=2):
"""Get direction target."""
batch_size = reg_targets.shape[0]
anchors = anchors.view(batch_size, -1, anchors.shape[-1])
rot_gt = reg_targets[..., 6] + anchors[..., 6]
offset_rot = common_utils.limit_period(rot_gt - dir_offset, 0, 2 * np.pi)
dir_cls_targets = torch.floor(offset_rot / (2 * np.pi / num_bins)).long()
dir_cls_targets = torch.clamp(dir_cls_targets, min=0, max=num_bins - 1)
if one_hot:
dir_targets = torch.zeros(*list(dir_cls_targets.shape), num_bins, dtype=anchors.dtype,
device=dir_cls_targets.device)
dir_targets.scatter_(-1, dir_cls_targets.unsqueeze(dim=-1).long(), 1.0)
dir_cls_targets = dir_targets
return dir_cls_targets
def get_box_reg_layer_loss(self):
"""Get box regression layer's loss."""
box_preds = self.forward_ret_dict['box_preds']
box_dir_cls_preds = self.forward_ret_dict.get('dir_cls_preds', None)
box_reg_targets = self.forward_ret_dict['box_reg_targets']
box_cls_labels = self.forward_ret_dict['box_cls_labels']
batch_size = int(box_preds.shape[0])
positives = box_cls_labels > 0
reg_weights = positives.float()
pos_normalizer = positives.sum(1, keepdim=True).float()
reg_weights /= torch.clamp(pos_normalizer, min=1.0)
if isinstance(self.anchors, list):
if self.use_multihead:
anchors = torch.cat(
[anchor.permute(3, 4, 0, 1, 2, 5).contiguous().view(-1, anchor.shape[-1]) for anchor in
self.anchors], dim=0)
else:
anchors = torch.cat(self.anchors, dim=-3)
else:
anchors = self.anchors
anchors = anchors.view(1, -1, anchors.shape[-1]).repeat(batch_size, 1, 1)
box_preds = box_preds.view(batch_size, -1,
box_preds.shape[-1] // self.num_anchors_per_location if not self.use_multihead else
box_preds.shape[-1])
# sin(a - b) = sinacosb-cosasinb
box_preds_sin, reg_targets_sin = self.add_sin_difference(box_preds, box_reg_targets)
loc_loss_src = self.reg_loss_func(box_preds_sin, reg_targets_sin, weights=reg_weights) # [N, M]
loc_loss = loc_loss_src.sum() / batch_size
loc_loss = loc_loss * self.model_cfg.loss_config.loss_weights['loc_weight']
box_loss = loc_loss
tb_dict = {
'rpn_loss_loc': loc_loss.item()
}
if box_dir_cls_preds is not None:
dir_targets = self.get_direction_target(
anchors, box_reg_targets,
dir_offset=self.model_cfg.dir_offset,
num_bins=self.model_cfg.num_dir_bins
)
dir_logits = box_dir_cls_preds.view(batch_size, -1, self.model_cfg.num_dir_bins)
weights = positives.type_as(dir_logits)
weights /= torch.clamp(weights.sum(-1, keepdim=True), min=1.0)
dir_loss = self.dir_loss_func(dir_logits, dir_targets, weights=weights)
dir_loss = dir_loss.sum() / batch_size
dir_loss = dir_loss * self.model_cfg.loss_config.loss_weights['dir_weight']
box_loss += dir_loss
tb_dict['rpn_loss_dir'] = dir_loss.item()
return box_loss, tb_dict
def get_loss(self):
"""Get total loss."""
cls_loss, tb_dict = self.get_cls_layer_loss()
box_loss, tb_dict_box = self.get_box_reg_layer_loss()
tb_dict.update(tb_dict_box)
rpn_loss = cls_loss + box_loss
tb_dict['rpn_loss'] = rpn_loss.item()
return rpn_loss, tb_dict
def generate_predicted_boxes(self, batch_size, cls_preds, box_preds, dir_cls_preds=None):
"""
Args:
batch_size:
cls_preds: (N, H, W, C1)
box_preds: (N, H, W, C2)
dir_cls_preds: (N, H, W, C3)
Returns:
batch_cls_preds: (B, num_boxes, num_classes)
batch_box_preds: (B, num_boxes, 7+C)
"""
if isinstance(self.anchors, list):
if self.use_multihead:
anchors = torch.cat([anchor.permute(3, 4, 0, 1, 2, 5).contiguous().view(-1, anchor.shape[-1])
for anchor in self.anchors], dim=0)
else:
anchors = torch.cat(self.anchors, dim=-3)
else:
anchors = self.anchors
num_anchors = anchors.view(-1, anchors.shape[-1]).shape[0]
batch_anchors = anchors.view(1, -1, anchors.shape[-1]).repeat(batch_size, 1, 1)
batch_cls_preds = cls_preds.view(batch_size, num_anchors, -1).float() \
if not isinstance(cls_preds, list) else cls_preds
batch_box_preds = box_preds.view(batch_size, num_anchors, -1) if not isinstance(box_preds, list) \
else torch.cat(box_preds, dim=1).view(batch_size, num_anchors, -1)
batch_box_preds = self.box_coder.decode_torch(batch_box_preds, batch_anchors)
if dir_cls_preds is not None:
dir_offset = self.model_cfg.dir_offset
dir_limit_offset = self.model_cfg.dir_limit_offset
dir_cls_preds = dir_cls_preds.view(batch_size, num_anchors, -1) if not isinstance(dir_cls_preds, list) \
else torch.cat(dir_cls_preds, dim=1).view(batch_size, num_anchors, -1)
dir_labels = torch.max(dir_cls_preds, dim=-1)[1]
period = (2 * np.pi / self.model_cfg.num_dir_bins)
dir_rot = common_utils.limit_period(
batch_box_preds[..., 6] - dir_offset, dir_limit_offset, period
)
batch_box_preds[..., 6] = dir_rot + dir_offset + period * dir_labels.to(batch_box_preds.dtype)
if isinstance(self.box_coder, box_coder_utils.PreviousResidualDecoder):
batch_box_preds[..., 6] = common_utils.limit_period(
-(batch_box_preds[..., 6] + np.pi / 2), offset=0.5, period=np.pi * 2
)
return batch_cls_preds, batch_box_preds
def forward(self, **kwargs):
"""Forward method."""
raise NotImplementedError
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/models/dense_heads/anchor_head_template.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dense Heads."""
from .anchor_head_single import AnchorHeadSingle
from .anchor_head_template import AnchorHeadTemplate
__all__ = {
'AnchorHeadTemplate': AnchorHeadTemplate,
'AnchorHeadSingle': AnchorHeadSingle
}
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/models/dense_heads/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Anchor Head Single module."""
import numpy as np
import torch.nn as nn
from .anchor_head_template import AnchorHeadTemplate
class AnchorHeadSingle(AnchorHeadTemplate):
"""AnchorHeadSingle class."""
def __init__(self, model_cfg, input_channels, num_class, class_names, grid_size, point_cloud_range,
predict_boxes_when_training=True):
"""Initialize."""
super().__init__(
model_cfg=model_cfg, num_class=num_class, class_names=class_names, grid_size=grid_size, point_cloud_range=point_cloud_range,
predict_boxes_when_training=predict_boxes_when_training
)
self.num_anchors_per_location = sum(self.num_anchors_per_location)
self.conv_cls = nn.Conv2d(
input_channels, self.num_anchors_per_location * self.num_class,
kernel_size=1
)
self.conv_box = nn.Conv2d(
input_channels, self.num_anchors_per_location * self.box_coder.code_size,
kernel_size=1
)
if self.model_cfg.get('use_direction_classifier', None) is not None:
self.conv_dir_cls = nn.Conv2d(
input_channels,
self.num_anchors_per_location * self.model_cfg.num_dir_bins,
kernel_size=1
)
else:
self.conv_dir_cls = None
self.init_weights()
def init_weights(self):
"""Initialize weights."""
pi = 0.01
nn.init.constant_(self.conv_cls.bias, -np.log((1 - pi) / pi))
nn.init.normal_(self.conv_box.weight, mean=0, std=0.001)
def forward(self, data_dict):
"""Forward method."""
spatial_features_2d = data_dict['spatial_features_2d']
cls_preds = self.conv_cls(spatial_features_2d)
box_preds = self.conv_box(spatial_features_2d)
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]
box_preds = box_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]
if self.training:
self.forward_ret_dict['cls_preds'] = cls_preds
self.forward_ret_dict['box_preds'] = box_preds
if self.conv_dir_cls is not None:
dir_cls_preds = self.conv_dir_cls(spatial_features_2d)
dir_cls_preds = dir_cls_preds.permute(0, 2, 3, 1).contiguous()
if self.training:
self.forward_ret_dict['dir_cls_preds'] = dir_cls_preds
else:
dir_cls_preds = None
if self.training:
targets_dict = self.assign_targets(
gt_boxes=data_dict['gt_boxes']
)
self.forward_ret_dict.update(targets_dict)
if not self.training or self.predict_boxes_when_training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=data_dict['batch_size'],
cls_preds=cls_preds, box_preds=box_preds, dir_cls_preds=dir_cls_preds
)
data_dict['batch_cls_preds'] = batch_cls_preds
data_dict['batch_box_preds'] = batch_box_preds
data_dict['cls_preds_normalized'] = False
return data_dict
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/models/dense_heads/anchor_head_single.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Target assigner."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/models/dense_heads/target_assigner/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Axis aligned target assigner."""
import numpy as np
import torch
from ....ops.iou3d_nms import iou3d_nms_utils
from ....utils import box_utils
class AxisAlignedTargetAssigner(object):
"""AxisAlignedTargetAssigner class."""
def __init__(self, model_cfg, class_names, box_coder, match_height=False):
"""Initialize."""
super().__init__()
anchor_generator_cfg = model_cfg.anchor_generator_config
anchor_target_cfg = model_cfg.target_assigner_config
self.box_coder = box_coder
self.match_height = match_height
self.class_names = np.array(class_names)
self.anchor_class_names = [config['class_name'] for config in anchor_generator_cfg]
self.pos_fraction = anchor_target_cfg.pos_fraction if anchor_target_cfg.pos_fraction >= 0 else None
self.sample_size = anchor_target_cfg.sample_size
self.norm_by_num_examples = anchor_target_cfg.norm_by_num_examples
self.matched_thresholds = {}
self.unmatched_thresholds = {}
for config in anchor_generator_cfg:
self.matched_thresholds[config['class_name']] = config['matched_threshold']
self.unmatched_thresholds[config['class_name']] = config['unmatched_threshold']
self.use_multihead = model_cfg.get('USE_MULTIHEAD', False)
self.seperate_multihead = model_cfg.get('SEPERATE_MULTIHEAD', False)
if self.seperate_multihead:
rpn_head_cfgs = model_cfg.RPN_HEAD_CFGS
self.gt_remapping = {}
for rpn_head_cfg in rpn_head_cfgs:
for idx, name in enumerate(rpn_head_cfg['HEAD_CLS_NAME']):
self.gt_remapping[name] = idx + 1
def assign_targets(self, all_anchors, gt_boxes_with_classes):
"""
Args:
all_anchors: [(N, 7), ...]
gt_boxes: (B, M, 8)
Returns:
"""
bbox_targets = []
cls_labels = []
reg_weights = []
batch_size = gt_boxes_with_classes.shape[0]
gt_classes = gt_boxes_with_classes[:, :, -1]
gt_boxes = gt_boxes_with_classes[:, :, :-1]
for k in range(batch_size):
cur_gt = gt_boxes[k]
cnt = cur_gt.__len__() - 1
while cnt > 0 and cur_gt[cnt].sum() == 0:
cnt -= 1
cur_gt = cur_gt[:cnt + 1]
cur_gt_classes = gt_classes[k][:cnt + 1].int()
target_list = []
for anchor_class_name, anchors in zip(self.anchor_class_names, all_anchors):
if cur_gt_classes.shape[0] > 1:
mask = torch.from_numpy(self.class_names[cur_gt_classes.cpu() - 1] == anchor_class_name)
else:
mask = torch.tensor([self.class_names[c - 1] == anchor_class_name
for c in cur_gt_classes], dtype=torch.bool)
if self.use_multihead:
anchors = anchors.permute(3, 4, 0, 1, 2, 5).contiguous().view(-1, anchors.shape[-1])
if self.seperate_multihead:
selected_classes = cur_gt_classes[mask].clone()
if len(selected_classes) > 0:
new_cls_id = self.gt_remapping[anchor_class_name]
selected_classes[:] = new_cls_id
else:
selected_classes = cur_gt_classes[mask]
else:
feature_map_size = anchors.shape[:3]
anchors = anchors.view(-1, anchors.shape[-1])
selected_classes = cur_gt_classes[mask]
single_target = self.assign_targets_single(
anchors,
cur_gt[mask],
gt_classes=selected_classes,
matched_threshold=self.matched_thresholds[anchor_class_name],
unmatched_threshold=self.unmatched_thresholds[anchor_class_name]
)
target_list.append(single_target)
if self.use_multihead:
target_dict = {
'box_cls_labels': [t['box_cls_labels'].view(-1) for t in target_list],
'box_reg_targets': [t['box_reg_targets'].view(-1, self.box_coder.code_size) for t in target_list],
'reg_weights': [t['reg_weights'].view(-1) for t in target_list]
}
target_dict['box_reg_targets'] = torch.cat(target_dict['box_reg_targets'], dim=0)
target_dict['box_cls_labels'] = torch.cat(target_dict['box_cls_labels'], dim=0).view(-1)
target_dict['reg_weights'] = torch.cat(target_dict['reg_weights'], dim=0).view(-1)
else:
target_dict = {
'box_cls_labels': [t['box_cls_labels'].view(*feature_map_size, -1) for t in target_list],
'box_reg_targets': [t['box_reg_targets'].view(*feature_map_size, -1, self.box_coder.code_size)
for t in target_list],
'reg_weights': [t['reg_weights'].view(*feature_map_size, -1) for t in target_list]
}
target_dict['box_reg_targets'] = torch.cat(
target_dict['box_reg_targets'], dim=-2
).view(-1, self.box_coder.code_size)
target_dict['box_cls_labels'] = torch.cat(target_dict['box_cls_labels'], dim=-1).view(-1)
target_dict['reg_weights'] = torch.cat(target_dict['reg_weights'], dim=-1).view(-1)
bbox_targets.append(target_dict['box_reg_targets'])
cls_labels.append(target_dict['box_cls_labels'])
reg_weights.append(target_dict['reg_weights'])
bbox_targets = torch.stack(bbox_targets, dim=0)
cls_labels = torch.stack(cls_labels, dim=0)
reg_weights = torch.stack(reg_weights, dim=0)
all_targets_dict = {
'box_cls_labels': cls_labels,
'box_reg_targets': bbox_targets,
'reg_weights': reg_weights
}
return all_targets_dict
def assign_targets_single(self, anchors, gt_boxes, gt_classes, matched_threshold=0.6, unmatched_threshold=0.45):
"""Assign target single."""
num_anchors = anchors.shape[0]
num_gt = gt_boxes.shape[0]
labels = torch.ones((num_anchors,), dtype=torch.int32, device=anchors.device) * -1
gt_ids = torch.ones((num_anchors,), dtype=torch.int32, device=anchors.device) * -1
if len(gt_boxes) > 0 and anchors.shape[0] > 0:
anchor_by_gt_overlap = iou3d_nms_utils.boxes_iou3d_gpu(anchors[:, 0:7], gt_boxes[:, 0:7]) \
if self.match_height else box_utils.boxes3d_nearest_bev_iou(anchors[:, 0:7], gt_boxes[:, 0:7])
anchor_to_gt_argmax = torch.from_numpy(anchor_by_gt_overlap.cpu().numpy().argmax(axis=1)).cuda()
anchor_to_gt_max = anchor_by_gt_overlap[
torch.arange(num_anchors, device=anchors.device), anchor_to_gt_argmax
]
gt_to_anchor_argmax = torch.from_numpy(anchor_by_gt_overlap.cpu().numpy().argmax(axis=0)).cuda()
gt_to_anchor_max = anchor_by_gt_overlap[gt_to_anchor_argmax, torch.arange(num_gt, device=anchors.device)]
empty_gt_mask = gt_to_anchor_max == 0
gt_to_anchor_max[empty_gt_mask] = -1
anchors_with_max_overlap = (anchor_by_gt_overlap == gt_to_anchor_max).nonzero()[:, 0]
gt_inds_force = anchor_to_gt_argmax[anchors_with_max_overlap]
labels[anchors_with_max_overlap] = gt_classes[gt_inds_force]
gt_ids[anchors_with_max_overlap] = gt_inds_force.int()
pos_inds = anchor_to_gt_max >= matched_threshold
gt_inds_over_thresh = anchor_to_gt_argmax[pos_inds]
labels[pos_inds] = gt_classes[gt_inds_over_thresh]
gt_ids[pos_inds] = gt_inds_over_thresh.int()
bg_inds = (anchor_to_gt_max < unmatched_threshold).nonzero()[:, 0]
else:
bg_inds = torch.arange(num_anchors, device=anchors.device)
fg_inds = (labels > 0).nonzero()[:, 0]
if self.pos_fraction is not None:
num_fg = int(self.pos_fraction * self.sample_size)
if len(fg_inds) > num_fg:
num_disabled = len(fg_inds) - num_fg
disable_inds = torch.randperm(len(fg_inds))[:num_disabled]
labels[disable_inds] = -1
fg_inds = (labels > 0).nonzero()[:, 0]
num_bg = self.sample_size - (labels > 0).sum()
if len(bg_inds) > num_bg:
enable_inds = bg_inds[torch.randint(0, len(bg_inds), size=(num_bg,))]
labels[enable_inds] = 0
# bg_inds = torch.nonzero(labels == 0)[:, 0]
else:
if len(gt_boxes) == 0 or anchors.shape[0] == 0:
labels[:] = 0
else:
labels[bg_inds] = 0
labels[anchors_with_max_overlap] = gt_classes[gt_inds_force]
bbox_targets = anchors.new_zeros((num_anchors, self.box_coder.code_size))
if len(gt_boxes) > 0 and anchors.shape[0] > 0:
fg_gt_boxes = gt_boxes[anchor_to_gt_argmax[fg_inds], :]
fg_anchors = anchors[fg_inds, :]
bbox_targets[fg_inds, :] = self.box_coder.encode_torch(fg_gt_boxes, fg_anchors)
reg_weights = anchors.new_zeros((num_anchors,))
if self.norm_by_num_examples:
num_examples = (labels >= 0).sum()
num_examples = num_examples if num_examples > 1.0 else 1.0
reg_weights[labels > 0] = 1.0 / num_examples
else:
reg_weights[labels > 0] = 1.0
ret_dict = {
'box_cls_labels': labels,
'box_reg_targets': bbox_targets,
'reg_weights': reg_weights,
}
return ret_dict
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/models/dense_heads/target_assigner/axis_aligned_target_assigner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Anchor generator."""
import torch
class AnchorGenerator(object):
"""AnchorGenerator class."""
def __init__(self, anchor_range, anchor_generator_config):
"""Initialize."""
super().__init__()
self.anchor_generator_cfg = anchor_generator_config
self.anchor_range = anchor_range
self.anchor_sizes = [config['anchor_sizes'] for config in anchor_generator_config]
self.anchor_rotations = [config['anchor_rotations'] for config in anchor_generator_config]
self.anchor_heights = [config['anchor_bottom_heights'] for config in anchor_generator_config]
self.align_center = [config.get('align_center', False) for config in anchor_generator_config]
assert len(self.anchor_sizes) == len(self.anchor_rotations) == len(self.anchor_heights)
self.num_of_anchor_sets = len(self.anchor_sizes)
def generate_anchors(self, grid_sizes):
"""Generate anchors."""
assert len(grid_sizes) == self.num_of_anchor_sets
all_anchors = []
num_anchors_per_location = []
for grid_size, anchor_size, anchor_rotation, anchor_height, align_center in zip(
grid_sizes, self.anchor_sizes, self.anchor_rotations, self.anchor_heights, self.align_center):
num_anchors_per_location.append(len(anchor_rotation) * len(anchor_size) * len(anchor_height))
if align_center:
x_stride = (self.anchor_range[3] - self.anchor_range[0]) / grid_size[0]
y_stride = (self.anchor_range[4] - self.anchor_range[1]) / grid_size[1]
x_offset, y_offset = x_stride / 2, y_stride / 2
else:
x_stride = (self.anchor_range[3] - self.anchor_range[0]) / (grid_size[0] - 1)
y_stride = (self.anchor_range[4] - self.anchor_range[1]) / (grid_size[1] - 1)
x_offset, y_offset = 0, 0
x_shifts = torch.arange(
self.anchor_range[0] + x_offset, self.anchor_range[3] + 1e-5, step=x_stride, dtype=torch.float32,
).cuda()
y_shifts = torch.arange(
self.anchor_range[1] + y_offset, self.anchor_range[4] + 1e-5, step=y_stride, dtype=torch.float32,
).cuda()
z_shifts = x_shifts.new_tensor(anchor_height)
num_anchor_size, num_anchor_rotation = anchor_size.__len__(), anchor_rotation.__len__()
anchor_rotation = x_shifts.new_tensor(anchor_rotation)
anchor_size = x_shifts.new_tensor(anchor_size)
x_shifts, y_shifts, z_shifts = torch.meshgrid([
x_shifts, y_shifts, z_shifts
]) # [x_grid, y_grid, z_grid]
anchors = torch.stack((x_shifts, y_shifts, z_shifts), dim=-1) # [x, y, z, 3]
anchors = anchors[:, :, :, None, :].repeat(1, 1, 1, anchor_size.shape[0], 1)
anchor_size = anchor_size.view(1, 1, 1, -1, 3).repeat([*anchors.shape[0:3], 1, 1])
anchors = torch.cat((anchors, anchor_size), dim=-1)
anchors = anchors[:, :, :, :, None, :].repeat(1, 1, 1, 1, num_anchor_rotation, 1)
anchor_rotation = anchor_rotation.view(1, 1, 1, 1, -1, 1).repeat([*anchors.shape[0:3], num_anchor_size, 1, 1])
anchors = torch.cat((anchors, anchor_rotation), dim=-1) # [x, y, z, num_size, num_rot, 7]
anchors = anchors.permute(2, 1, 0, 3, 4, 5).contiguous()
# anchors = anchors.view(-1, anchors.shape[-1])
anchors[..., 2] += anchors[..., 5] / 2 # shift to box centers
all_anchors.append(anchors)
return all_anchors, num_anchors_per_location
if __name__ == '__main__':
from easydict import EasyDict
config = [
EasyDict({
'anchor_sizes': [[2.1, 4.7, 1.7], [0.86, 0.91, 1.73], [0.84, 1.78, 1.78]],
'anchor_rotations': [0, 1.57],
'anchor_heights': [0, 0.5]
})
]
A = AnchorGenerator(
anchor_range=[-75.2, -75.2, -2, 75.2, 75.2, 4],
anchor_generator_config=config
)
import pdb
pdb.set_trace()
A.generate_anchors([[188, 188]])
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/models/dense_heads/target_assigner/anchor_generator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""2D backbone module."""
from .base_bev_backbone import BaseBEVBackbone
__all__ = {
'BaseBEVBackbone': BaseBEVBackbone
}
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/models/backbones_2d/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base BEV backbone."""
import numpy as np
import torch
import torch.nn as nn
class BaseBEVBackbone(nn.Module):
"""Base BEV backbone."""
def __init__(self, model_cfg, input_channels):
"""Initialize."""
super().__init__()
self.model_cfg = model_cfg
if self.model_cfg.get('layer_nums', None) is not None:
assert len(self.model_cfg.layer_nums) == len(self.model_cfg.layer_strides) == len(self.model_cfg.num_filters)
layer_nums = self.model_cfg.layer_nums
layer_strides = self.model_cfg.layer_strides
num_filters = self.model_cfg.num_filters
else:
layer_nums = layer_strides = num_filters = []
if self.model_cfg.get('upsample_strides', None) is not None:
assert len(self.model_cfg.upsample_strides) == len(self.model_cfg.num_upsample_filters)
num_upsample_filters = self.model_cfg.num_upsample_filters
upsample_strides = self.model_cfg.upsample_strides
else:
upsample_strides = num_upsample_filters = []
num_levels = len(layer_nums)
c_in_list = [input_channels, *num_filters[:-1]]
self.blocks = nn.ModuleList()
self.deblocks = nn.ModuleList()
for idx in range(num_levels):
cur_layers = [
nn.ZeroPad2d(1),
nn.Conv2d(
c_in_list[idx], num_filters[idx], kernel_size=3,
stride=layer_strides[idx], padding=0, bias=False
),
nn.BatchNorm2d(num_filters[idx], eps=1e-3, momentum=0.01),
nn.ReLU()
]
for _ in range(layer_nums[idx]):
cur_layers.extend([
nn.Conv2d(num_filters[idx], num_filters[idx], kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(num_filters[idx], eps=1e-3, momentum=0.01),
nn.ReLU()
])
self.blocks.append(nn.Sequential(*cur_layers))
if len(upsample_strides) > 0:
stride = upsample_strides[idx]
if stride >= 1:
self.deblocks.append(nn.Sequential(
nn.ConvTranspose2d(
num_filters[idx], num_upsample_filters[idx],
upsample_strides[idx],
stride=upsample_strides[idx], bias=False
),
nn.BatchNorm2d(num_upsample_filters[idx], eps=1e-3, momentum=0.01),
nn.ReLU()
))
else:
stride = np.round(1 / stride).astype(np.int)
self.deblocks.append(nn.Sequential(
nn.Conv2d(
num_filters[idx], num_upsample_filters[idx],
stride,
stride=stride, bias=False
),
nn.BatchNorm2d(num_upsample_filters[idx], eps=1e-3, momentum=0.01),
nn.ReLU()
))
c_in = sum(num_upsample_filters)
if len(upsample_strides) > num_levels:
self.deblocks.append(nn.Sequential(
nn.ConvTranspose2d(c_in, c_in, upsample_strides[-1], stride=upsample_strides[-1], bias=False),
nn.BatchNorm2d(c_in, eps=1e-3, momentum=0.01),
nn.ReLU(),
))
self.num_bev_features = c_in
def forward(self, data_dict):
"""
Args:
data_dict:
spatial_features
Returns:
"""
spatial_features = data_dict['spatial_features']
ups = []
ret_dict = {}
x = spatial_features
for i in range(len(self.blocks)):
x = self.blocks[i](x)
stride = int(spatial_features.shape[2] / x.shape[2])
ret_dict['spatial_features_%dx' % stride] = x
if len(self.deblocks) > 0:
ups.append(self.deblocks[i](x))
else:
ups.append(x)
if len(ups) > 1:
x = torch.cat(ups, dim=1)
elif len(ups) == 1:
x = ups[0]
if len(self.deblocks) > len(self.blocks):
x = self.deblocks[-1](x)
data_dict['spatial_features_2d'] = x
return data_dict
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/models/backbones_2d/base_bev_backbone.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Map Voxels to BEV(2D feature map)."""
from .pointpillar_scatter import PointPillarScatter
__all__ = {
'PointPillarScatter': PointPillarScatter
}
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/models/backbones_2d/map_to_bev/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Scatter module for PointPillars."""
import torch
import torch.nn as nn
class PointPillarScatter(nn.Module):
"""PointPillarScatter class."""
def __init__(self, model_cfg, grid_size, **kwargs):
"""Initialize."""
super().__init__()
self.model_cfg = model_cfg
self.num_bev_features = self.model_cfg.num_bev_features
self.nx, self.ny, self.nz = grid_size
assert self.nz == 1
def forward(self, batch_dict, **kwargs):
"""Forward."""
pillar_features, coords = batch_dict['pillar_features'], batch_dict['voxel_coords']
batch_spatial_features = []
batch_size = coords[:, 0].max().int().item() + 1
for batch_idx in range(batch_size):
spatial_feature = torch.zeros(
self.num_bev_features,
self.nz * self.nx * self.ny,
dtype=pillar_features.dtype,
device=pillar_features.device)
batch_mask = coords[:, 0] == batch_idx
this_coords = coords[batch_mask, :]
indices = this_coords[:, 1] + this_coords[:, 2] * self.nx + this_coords[:, 3]
indices = indices.type(torch.long)
pillars = pillar_features[batch_mask, :]
pillars = pillars.t()
spatial_feature[:, indices] = pillars
batch_spatial_features.append(spatial_feature)
batch_spatial_features = torch.stack(batch_spatial_features, 0)
batch_spatial_features = batch_spatial_features.view(batch_size, self.num_bev_features * self.nz, self.ny, self.nx)
batch_dict['spatial_features'] = batch_spatial_features
return batch_dict
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/models/backbones_2d/map_to_bev/pointpillar_scatter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NMS utils."""
import torch
from ...ops.iou3d_nms import iou3d_nms_utils
def class_agnostic_nms(box_scores, box_preds, nms_config, score_thresh=None):
"""Class-agnostic NMS."""
src_box_scores = box_scores
if score_thresh is not None:
scores_mask = (box_scores >= score_thresh)
box_scores = box_scores[scores_mask]
box_preds = box_preds[scores_mask]
selected = []
if box_scores.shape[0] > 0:
box_scores_nms, indices = torch.topk(box_scores, k=min(nms_config.nms_pre_max_size, box_scores.shape[0]))
boxes_for_nms = box_preds[indices]
keep_idx = getattr(iou3d_nms_utils, nms_config.nms_type)(boxes_for_nms[:, 0:7], box_scores_nms, nms_config.nms_thresh, **nms_config)[0]
selected = indices[keep_idx[:nms_config.nms_post_max_size]]
if score_thresh is not None:
original_idxs = scores_mask.nonzero().view(-1)
selected = original_idxs[selected]
return selected, src_box_scores[selected]
def multi_classes_nms(cls_scores, box_preds, nms_config, score_thresh=None):
"""
Args:
cls_scores: (N, num_class)
box_preds: (N, 7 + C)
nms_config:
score_thresh:
Returns:
"""
pred_scores, pred_labels, pred_boxes = [], [], []
for k in range(cls_scores.shape[1]):
if score_thresh is not None:
scores_mask = (cls_scores[:, k] >= score_thresh)
box_scores = cls_scores[scores_mask, k]
cur_box_preds = box_preds[scores_mask]
else:
box_scores = cls_scores[:, k]
selected = []
if box_scores.shape[0] > 0:
box_scores_nms, indices = torch.topk(box_scores, k=min(nms_config.nms_pre_max_size, box_scores.shape[0]))
boxes_for_nms = cur_box_preds[indices]
keep_idx = getattr(iou3d_nms_utils, nms_config.nms_type)(boxes_for_nms[:, 0:7], box_scores_nms, nms_config.nms_thresh, **nms_config)[0]
selected = indices[keep_idx[:nms_config.nms_post_max_size]]
pred_scores.append(box_scores[selected])
pred_labels.append(box_scores.new_ones(len(selected)).long() * k)
pred_boxes.append(cur_box_preds[selected])
pred_scores = torch.cat(pred_scores, dim=0)
pred_labels = torch.cat(pred_labels, dim=0)
pred_boxes = torch.cat(pred_boxes, dim=0)
return pred_scores, pred_labels, pred_boxes
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/models/model_utils/model_nms_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model utils."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/models/model_utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""3D backbones."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/models/backbones_3d/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VFE Template module."""
import torch.nn as nn
class VFETemplate(nn.Module):
"""VFETemplate class."""
def __init__(self, model_cfg, **kwargs):
"""Initialize."""
super().__init__()
self.model_cfg = model_cfg
def get_output_feature_dim(self):
"""Get output feature dimension."""
raise NotImplementedError
def forward(self, **kwargs):
"""
Args:
**kwargs:
Returns:
batch_dict:
...
vfe_features: (num_voxels, C)
"""
raise NotImplementedError
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/models/backbones_3d/vfe/vfe_template.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pillar VFE module."""
import torch
import torch.nn as nn
import torch.nn.functional as F
from .vfe_template import VFETemplate
class PFNLayer(nn.Module):
"""PFN layer."""
def __init__(self,
in_channels,
out_channels,
use_norm=True,
last_layer=False):
"""Initialize."""
super().__init__()
self.last_vfe = last_layer
self.use_norm = use_norm
if not self.last_vfe:
out_channels = out_channels // 2
if self.use_norm:
self.linear = nn.Linear(in_channels, out_channels, bias=False)
self.norm = nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01)
else:
self.linear = nn.Linear(in_channels, out_channels, bias=True)
self.part = 50000
def forward(self, inputs):
"""Forward."""
x = self.linear(inputs)
voxel_num_points = x.cpu().detach().numpy().shape[1]
if self.use_norm:
x = self.norm(x.permute(0, 2, 1))
x = F.relu(x)
x = F.max_pool1d(x, voxel_num_points, stride=1)
x_max = x.permute(0, 2, 1)
else:
x = F.relu(x)
x = x.permute(0, 2, 1)
x = F.max_pool1d(x, voxel_num_points, stride=1)
x_max = x.permute(0, 2, 1)
if self.last_vfe:
return x_max
x_repeat = x_max.repeat(1, inputs.shape[1], 1)
x_concatenated = torch.cat([x, x_repeat], dim=2)
return x_concatenated
class PillarVFE(VFETemplate):
"""Pillar VFE module."""
def __init__(self, model_cfg, num_point_features, voxel_size, point_cloud_range):
"""Initialize."""
super().__init__(model_cfg=model_cfg)
self.use_norm = self.model_cfg.use_norm
self.with_distance = self.model_cfg.with_distance
self.use_absolute_xyz = self.model_cfg.use_absolue_xyz
num_point_features += 6 if self.use_absolute_xyz else 3
if self.with_distance:
num_point_features += 1
self.num_filters = self.model_cfg.num_filters
assert len(self.num_filters) > 0
num_filters = [num_point_features] + list(self.num_filters)
pfn_layers = []
for i in range(len(num_filters) - 1):
in_filters = num_filters[i]
out_filters = num_filters[i + 1]
pfn_layers.append(
PFNLayer(in_filters, out_filters, self.use_norm, last_layer=(i >= len(num_filters) - 2))
)
self.pfn_layers = nn.ModuleList(pfn_layers)
self.voxel_x = voxel_size[0]
self.voxel_y = voxel_size[1]
self.voxel_z = voxel_size[2]
self.x_offset = self.voxel_x / 2 + point_cloud_range[0]
self.y_offset = self.voxel_y / 2 + point_cloud_range[1]
self.z_offset = self.voxel_z / 2 + point_cloud_range[2]
def get_output_feature_dim(self):
"""Get output feature dimension."""
return self.num_filters[-1]
def get_paddings_indicator(self, actual_num, max_num, axis=0):
"""Get paddings indicator."""
actual_num = torch.unsqueeze(actual_num, axis + 1)
max_num_shape = [1] * len(actual_num.shape)
max_num_shape[axis + 1] = -1
max_num = torch.arange(max_num, dtype=torch.int, device=actual_num.device).view(max_num_shape)
paddings_indicator = actual_num.int() > max_num
return paddings_indicator
def forward(self, batch_dict, **kwargs):
"""Forward method."""
voxel_features, voxel_num_points, coords = batch_dict['voxels'], batch_dict['voxel_num_points'], batch_dict['voxel_coords']
points_mean = voxel_features[:, :, :3].sum(dim=1, keepdim=True) / voxel_num_points.type_as(voxel_features).view(-1, 1, 1)
f_cluster = voxel_features[:, :, :3] - points_mean
f_center = torch.zeros_like(voxel_features[:, :, :3])
f_center[:, :, 0] = voxel_features[:, :, 0] - (coords[:, 3].to(voxel_features.dtype).unsqueeze(1) * self.voxel_x + self.x_offset)
f_center[:, :, 1] = voxel_features[:, :, 1] - (coords[:, 2].to(voxel_features.dtype).unsqueeze(1) * self.voxel_y + self.y_offset)
f_center[:, :, 2] = voxel_features[:, :, 2] - (coords[:, 1].to(voxel_features.dtype).unsqueeze(1) * self.voxel_z + self.z_offset)
if self.use_absolute_xyz:
features = [voxel_features, f_cluster, f_center]
else:
features = [voxel_features[..., 3:], f_cluster, f_center]
if self.with_distance:
points_dist = torch.norm(voxel_features[:, :, :3], 2, 2, keepdim=True)
features.append(points_dist)
features = torch.cat(features, dim=-1)
voxel_count = features.shape[1]
mask = self.get_paddings_indicator(voxel_num_points, voxel_count, axis=0)
mask = torch.unsqueeze(mask, -1).type_as(voxel_features)
features *= mask
for pfn in self.pfn_layers:
features = pfn(features)
features = features.squeeze()
batch_dict['pillar_features'] = features
return batch_dict
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/models/backbones_3d/vfe/pillar_vfe.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VFE modules."""
from .pillar_vfe import PillarVFE
from .vfe_template import VFETemplate
__all__ = {
'VFETemplate': VFETemplate,
'PillarVFE': PillarVFE
}
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/models/backbones_3d/vfe/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Detector module Template."""
from .detector3d_template import Detector3DTemplate
from .pointpillar import PointPillar
__all__ = {
'Detector3DTemplate': Detector3DTemplate,
'PointPillar': PointPillar,
}
def build_detector(model_cfg, num_class, dataset):
"""Build the detector."""
model = __all__[model_cfg.name](
model_cfg=model_cfg, num_class=num_class, dataset=dataset
)
return model
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/models/detectors/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PointPillars model definition."""
from .detector3d_template import Detector3DTemplate
class PointPillar(Detector3DTemplate):
"""PointPillars model class."""
def __init__(self, model_cfg, num_class, dataset):
"""Initialize."""
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict):
"""Forward."""
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss()
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
pred_dicts, recall_dicts = self.post_processing(batch_dict)
return pred_dicts, recall_dicts
def get_training_loss(self):
"""Get training loss."""
disp_dict = {}
loss_rpn, tb_dict = self.dense_head.get_loss()
tb_dict = {
'loss_rpn': loss_rpn.item(),
**tb_dict
}
loss = loss_rpn
return loss, tb_dict, disp_dict
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/models/detectors/pointpillar.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Template detector class of PointPillars."""
import os
import torch
import torch.nn as nn
from ...ops.iou3d_nms import iou3d_nms_utils
from .. import backbones_2d, backbones_3d, dense_heads
from ..backbones_2d import map_to_bev
from ..backbones_3d import vfe
from ..model_utils import model_nms_utils
class Detector3DTemplate(nn.Module):
"""Template 3D Detector."""
def __init__(self, model_cfg, num_class, dataset):
"""Initialize."""
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.class_names = dataset.class_names
self.num_point_features = dataset.point_feature_encoder.num_point_features
self.grid_size = dataset.grid_size
self.point_cloud_range = dataset.point_cloud_range
self.voxel_size = dataset.voxel_size
self.register_buffer('global_step', torch.LongTensor(1).zero_())
self.module_topology = [
'vfe', 'backbone_3d', 'map_to_bev_module',
'backbone_2d', 'dense_head', 'point_head',
]
@property
def mode(self):
"""mode of training."""
return 'train' if self.training else 'test'
def update_global_step(self):
"""Update global step."""
self.global_step += 1
def build_networks(self):
"""Build the network."""
model_info_dict = {
'module_list': [],
'num_rawpoint_features': self.num_point_features,
'num_point_features': self.num_point_features,
'grid_size': self.grid_size,
'point_cloud_range': self.point_cloud_range,
'voxel_size': self.voxel_size
}
for module_name in self.module_topology:
module, model_info_dict = getattr(self, 'build_%s' % module_name)(
model_info_dict=model_info_dict
)
self.add_module(module_name, module)
return model_info_dict['module_list']
def build_vfe(self, model_info_dict):
"""Build VFE module."""
if self.model_cfg.get('vfe', None) is None:
return None, model_info_dict
vfe_module = vfe.__all__[self.model_cfg.vfe.name](
model_cfg=self.model_cfg.vfe,
num_point_features=model_info_dict['num_rawpoint_features'],
point_cloud_range=model_info_dict['point_cloud_range'],
voxel_size=model_info_dict['voxel_size']
)
model_info_dict['num_point_features'] = vfe_module.get_output_feature_dim()
model_info_dict['module_list'].append(vfe_module)
return vfe_module, model_info_dict
def build_backbone_3d(self, model_info_dict):
"""Build 3D backbone module."""
if self.model_cfg.get('backbone_3d', None) is None:
return None, model_info_dict
backbone_3d_module = backbones_3d.__all__[self.model_cfg.backbone_3d.name](
model_cfg=self.model_cfg.backbone_3d,
input_channels=model_info_dict['num_point_features'],
grid_size=model_info_dict['grid_size'],
voxel_size=model_info_dict['voxel_size'],
point_cloud_range=model_info_dict['point_cloud_range']
)
model_info_dict['module_list'].append(backbone_3d_module)
model_info_dict['num_point_features'] = backbone_3d_module.num_point_features
return backbone_3d_module, model_info_dict
def build_map_to_bev_module(self, model_info_dict):
"""Build scatter module."""
if self.model_cfg.get('map_to_bev', None) is None:
return None, model_info_dict
map_to_bev_module = map_to_bev.__all__[self.model_cfg.map_to_bev.name](
model_cfg=self.model_cfg.map_to_bev,
grid_size=model_info_dict['grid_size']
)
model_info_dict['module_list'].append(map_to_bev_module)
model_info_dict['num_bev_features'] = map_to_bev_module.num_bev_features
return map_to_bev_module, model_info_dict
def build_backbone_2d(self, model_info_dict):
"""Build 2D backbone module."""
if self.model_cfg.get('backbone_2d', None) is None:
return None, model_info_dict
backbone_2d_module = backbones_2d.__all__[self.model_cfg.backbone_2d.name](
model_cfg=self.model_cfg.backbone_2d,
input_channels=model_info_dict['num_bev_features']
)
model_info_dict['module_list'].append(backbone_2d_module)
model_info_dict['num_bev_features'] = backbone_2d_module.num_bev_features
return backbone_2d_module, model_info_dict
def build_dense_head(self, model_info_dict):
"""Build dense head module."""
if self.model_cfg.get('dense_head', None) is None:
return None, model_info_dict
dense_head_module = dense_heads.__all__[self.model_cfg.dense_head.name](
model_cfg=self.model_cfg.dense_head,
input_channels=model_info_dict['num_bev_features'],
num_class=self.num_class if not self.model_cfg.dense_head.class_agnostic else 1,
class_names=self.class_names,
grid_size=model_info_dict['grid_size'],
point_cloud_range=model_info_dict['point_cloud_range'],
predict_boxes_when_training=self.model_cfg.get('roi_head', False)
)
model_info_dict['module_list'].append(dense_head_module)
return dense_head_module, model_info_dict
def build_point_head(self, model_info_dict):
"""Build point head module."""
if self.model_cfg.get('point_head', None) is None:
return None, model_info_dict
if self.model_cfg.point_head.get('use_point_features_before_fusion', False):
num_point_features = model_info_dict['num_point_features_before_fusion']
else:
num_point_features = model_info_dict['num_point_features']
point_head_module = dense_heads.__all__[self.model_cfg.point_head.name](
model_cfg=self.model_cfg.point_head,
input_channels=num_point_features,
num_class=self.num_class if not self.model_cfg.point_head.class_agnostic else 1,
predict_boxes_when_training=self.model_cfg.get('roi_head', False)
)
model_info_dict['module_list'].append(point_head_module)
return point_head_module, model_info_dict
def forward(self, **kwargs):
"""Forward method."""
raise NotImplementedError
def post_processing(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1)
or [(B, num_boxes, num_class1), (B, num_boxes, num_class2) ...]
multihead_label_mapping: [(num_class1), (num_class2), ...]
batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C)
cls_preds_normalized: indicate whether batch_cls_preds is normalized
batch_index: optional (N1+N2+...)
has_class_labels: True/False
roi_labels: (B, num_rois) 1 .. num_classes
batch_pred_labels: (B, num_boxes, 1)
Returns:
"""
post_process_cfg = self.model_cfg.post_processing
batch_size = batch_dict['batch_size']
recall_dict = {}
pred_dicts = []
for index in range(batch_size):
if batch_dict.get('batch_index', None) is not None:
assert batch_dict['batch_box_preds'].shape.__len__() == 2
batch_mask = (batch_dict['batch_index'] == index)
else:
assert batch_dict['batch_box_preds'].shape.__len__() == 3
batch_mask = index
box_preds = batch_dict['batch_box_preds'][batch_mask]
src_box_preds = box_preds
if not isinstance(batch_dict['batch_cls_preds'], list):
cls_preds = batch_dict['batch_cls_preds'][batch_mask]
src_cls_preds = cls_preds
assert cls_preds.shape[1] in [1, self.num_class]
if not batch_dict['cls_preds_normalized']:
cls_preds = torch.sigmoid(cls_preds)
else:
cls_preds = [x[batch_mask] for x in batch_dict['batch_cls_preds']]
src_cls_preds = cls_preds
if not batch_dict['cls_preds_normalized']:
cls_preds = [torch.sigmoid(x) for x in cls_preds]
if post_process_cfg.nms_config.multi_classes_nms:
if not isinstance(cls_preds, list):
cls_preds = [cls_preds]
multihead_label_mapping = [torch.arange(1, self.num_class, device=cls_preds[0].device)]
else:
multihead_label_mapping = batch_dict['multihead_label_mapping']
cur_start_idx = 0
pred_scores, pred_labels, pred_boxes = [], [], []
for cur_cls_preds, cur_label_mapping in zip(cls_preds, multihead_label_mapping):
assert cur_cls_preds.shape[1] == len(cur_label_mapping)
cur_box_preds = box_preds[cur_start_idx: cur_start_idx + cur_cls_preds.shape[0]]
cur_pred_scores, cur_pred_labels, cur_pred_boxes = model_nms_utils.multi_classes_nms(
cls_scores=cur_cls_preds, box_preds=cur_box_preds,
nms_config=post_process_cfg.nms_config,
score_thresh=post_process_cfg.score_thresh
)
cur_pred_labels = cur_label_mapping[cur_pred_labels]
pred_scores.append(cur_pred_scores)
pred_labels.append(cur_pred_labels)
pred_boxes.append(cur_pred_boxes)
cur_start_idx += cur_cls_preds.shape[0]
final_scores = torch.cat(pred_scores, dim=0)
final_labels = torch.cat(pred_labels, dim=0)
final_boxes = torch.cat(pred_boxes, dim=0)
else:
cls_preds, label_preds = torch.max(cls_preds, dim=-1)
if batch_dict.get('has_class_labels', False):
label_key = 'roi_labels' if 'roi_labels' in batch_dict else 'batch_pred_labels'
label_preds = batch_dict[label_key][index]
else:
label_preds = label_preds + 1
selected, selected_scores = model_nms_utils.class_agnostic_nms(
box_scores=cls_preds, box_preds=box_preds,
nms_config=post_process_cfg.nms_config,
score_thresh=post_process_cfg.score_thresh
)
if post_process_cfg.output_raw_score:
max_cls_preds, _ = torch.max(src_cls_preds, dim=-1)
selected_scores = max_cls_preds[selected]
final_scores = selected_scores
final_labels = label_preds[selected]
final_boxes = box_preds[selected]
recall_dict = self.generate_recall_record(
box_preds=final_boxes if 'rois' not in batch_dict else src_box_preds,
recall_dict=recall_dict, batch_index=index, data_dict=batch_dict,
thresh_list=post_process_cfg.recall_thresh_list
)
record_dict = {
'pred_boxes': final_boxes,
'pred_scores': final_scores,
'pred_labels': final_labels
}
pred_dicts.append(record_dict)
return pred_dicts, recall_dict
@staticmethod
def generate_recall_record(box_preds, recall_dict, batch_index, data_dict=None, thresh_list=None):
"""Generate recall record."""
if 'gt_boxes' not in data_dict:
return recall_dict
rois = data_dict['rois'][batch_index] if 'rois' in data_dict else None
gt_boxes = data_dict['gt_boxes'][batch_index]
if recall_dict.__len__() == 0:
recall_dict = {'gt': 0}
for cur_thresh in thresh_list:
recall_dict['roi_%s' % (str(cur_thresh))] = 0
recall_dict['rcnn_%s' % (str(cur_thresh))] = 0
cur_gt = gt_boxes
k = cur_gt.__len__() - 1
while k > 0 and cur_gt[k].sum() == 0:
k -= 1
cur_gt = cur_gt[:k + 1]
if cur_gt.shape[0] > 0:
if box_preds.shape[0] > 0:
iou3d_rcnn = iou3d_nms_utils.boxes_iou3d_gpu(box_preds[:, 0:7], cur_gt[:, 0:7])
else:
iou3d_rcnn = torch.zeros((0, cur_gt.shape[0]))
if rois is not None:
iou3d_roi = iou3d_nms_utils.boxes_iou3d_gpu(rois[:, 0:7], cur_gt[:, 0:7])
for cur_thresh in thresh_list:
if iou3d_rcnn.shape[0] == 0:
recall_dict['rcnn_%s' % str(cur_thresh)] += 0
else:
rcnn_recalled = (iou3d_rcnn.max(dim=0)[0] > cur_thresh).sum().item()
recall_dict['rcnn_%s' % str(cur_thresh)] += rcnn_recalled
if rois is not None:
roi_recalled = (iou3d_roi.max(dim=0)[0] > cur_thresh).sum().item()
recall_dict['roi_%s' % str(cur_thresh)] += roi_recalled
recall_dict['gt'] += cur_gt.shape[0]
return recall_dict
def load_params_from_file(self, filename, logger, to_cpu=False):
"""Load model parameters from file."""
if not os.path.isfile(filename):
raise FileNotFoundError
logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU'))
loc_type = torch.device('cpu') if to_cpu else None
checkpoint = torch.load(filename, map_location=loc_type)
model_state_disk = checkpoint['model_state']
if 'version' in checkpoint:
logger.info('==> Checkpoint trained from version: %s' % checkpoint['version'])
update_model_state = {}
state_dict = self.state_dict() # noqa # pylint: disable=missing-kwoa
for key, val in model_state_disk.items():
if key in state_dict and state_dict[key].shape == model_state_disk[key].shape:
update_model_state[key] = val
# logger.info('Update weight %s: %s' % (key, str(val.shape)))
state_dict.update(update_model_state)
self.load_state_dict(state_dict)
for key in state_dict:
if key not in update_model_state:
logger.info('Not updated weight %s: %s' % (key, str(state_dict[key].shape)))
logger.info(
'==> Done (loaded %d/%d)' % (
len(update_model_state),
len(self.state_dict()) # noqa # pylint: disable=missing-kwoa
)
)
def load_params_with_optimizer(self, filename, to_cpu=False, optimizer=None, logger=None):
"""Load model parameters with optimizer."""
if not os.path.isfile(filename):
raise FileNotFoundError
logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU'))
loc_type = torch.device('cpu') if to_cpu else None
checkpoint = torch.load(filename, map_location=loc_type)
epoch = checkpoint.get('epoch', -1)
it = checkpoint.get('it', 0.0)
self.load_state_dict(checkpoint['model_state'])
if optimizer is not None:
if 'optimizer_state' in checkpoint and checkpoint['optimizer_state'] is not None:
logger.info('==> Loading optimizer parameters from checkpoint %s to %s'
% (filename, 'CPU' if to_cpu else 'GPU'))
optimizer.load_state_dict(checkpoint['optimizer_state'])
else:
assert filename[-4] == '.', filename
src_file, ext = filename[:-4], filename[-3:]
optimizer_filename = '%s_optim.%s' % (src_file, ext)
if os.path.exists(optimizer_filename):
optimizer_ckpt = torch.load(optimizer_filename, map_location=loc_type)
optimizer.load_state_dict(optimizer_ckpt['optimizer_state'])
if 'version' in checkpoint:
print('==> Checkpoint trained from version: %s' % checkpoint['version'])
logger.info('==> Done')
return it, epoch
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/models/detectors/detector3d_template.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom ops for PointPillars."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/ops/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""3D NMS."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/ops/iou3d_nms/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
"""
3D IoU Calculation and Rotated NMS
Written by Shaoshuai Shi
All Rights Reserved 2019-2020.
"""
import torch
from ...utils import common_utils
from . import iou3d_nms_cuda
def boxes_bev_iou_cpu(boxes_a, boxes_b):
"""
Args:
boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
boxes_b: (N, 7) [x, y, z, dx, dy, dz, heading]
Returns:
"""
boxes_a, is_numpy = common_utils.check_numpy_to_torch(boxes_a)
boxes_b, is_numpy = common_utils.check_numpy_to_torch(boxes_b)
assert not (boxes_a.is_cuda or boxes_b.is_cuda), 'Only support CPU tensors'
assert boxes_a.shape[1] == 7 and boxes_b.shape[1] == 7
ans_iou = boxes_a.new_zeros(torch.Size((boxes_a.shape[0], boxes_b.shape[0])))
iou3d_nms_cuda.boxes_iou_bev_cpu( # noqa pylint: disable=I1101
boxes_a.contiguous(), boxes_b.contiguous(), ans_iou
)
return ans_iou.numpy() if is_numpy else ans_iou
def boxes_iou_bev(boxes_a, boxes_b):
"""
Args:
boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
boxes_b: (N, 7) [x, y, z, dx, dy, dz, heading]
Returns:
ans_iou: (N, M)
"""
assert boxes_a.shape[1] == boxes_b.shape[1] == 7
ans_iou = torch.cuda.FloatTensor(torch.Size((boxes_a.shape[0], boxes_b.shape[0]))).zero_()
iou3d_nms_cuda.boxes_iou_bev_gpu( # noqa pylint: disable=I1101
boxes_a.contiguous(), boxes_b.contiguous(), ans_iou
)
return ans_iou
def boxes_iou3d_gpu(boxes_a, boxes_b):
"""
Args:
boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
boxes_b: (N, 7) [x, y, z, dx, dy, dz, heading]
Returns:
ans_iou: (N, M)
"""
assert boxes_a.shape[1] == boxes_b.shape[1] == 7
# height overlap
boxes_a_height_max = (boxes_a[:, 2] + boxes_a[:, 5] / 2).view(-1, 1)
boxes_a_height_min = (boxes_a[:, 2] - boxes_a[:, 5] / 2).view(-1, 1)
boxes_b_height_max = (boxes_b[:, 2] + boxes_b[:, 5] / 2).view(1, -1)
boxes_b_height_min = (boxes_b[:, 2] - boxes_b[:, 5] / 2).view(1, -1)
# bev overlap
overlaps_bev = torch.cuda.FloatTensor(torch.Size((boxes_a.shape[0], boxes_b.shape[0]))).zero_() # (N, M)
iou3d_nms_cuda.boxes_overlap_bev_gpu( # noqa pylint: disable=I1101
boxes_a.contiguous(), boxes_b.contiguous(), overlaps_bev
)
max_of_min = torch.max(boxes_a_height_min, boxes_b_height_min)
min_of_max = torch.min(boxes_a_height_max, boxes_b_height_max)
overlaps_h = torch.clamp(min_of_max - max_of_min, min=0)
# 3d iou
overlaps_3d = overlaps_bev * overlaps_h
vol_a = (boxes_a[:, 3] * boxes_a[:, 4] * boxes_a[:, 5]).view(-1, 1)
vol_b = (boxes_b[:, 3] * boxes_b[:, 4] * boxes_b[:, 5]).view(1, -1)
iou3d = overlaps_3d / torch.clamp(vol_a + vol_b - overlaps_3d, min=1e-6)
return iou3d
def nms_gpu(boxes, scores, thresh, pre_maxsize=None, **kwargs):
"""
:param boxes: (N, 7) [x, y, z, dx, dy, dz, heading]
:param scores: (N)
:param thresh:
:return:
"""
assert boxes.shape[1] == 7
order = scores.sort(0, descending=True)[1]
if pre_maxsize is not None:
order = order[:pre_maxsize]
boxes = boxes[order].contiguous()
keep = torch.LongTensor(boxes.size(0))
num_out = iou3d_nms_cuda.nms_gpu( # noqa pylint: disable=I1101
boxes, keep, thresh
)
return order[keep[:num_out].cuda()].contiguous(), None
def nms_normal_gpu(boxes, scores, thresh, **kwargs):
"""
:param boxes: (N, 7) [x, y, z, dx, dy, dz, heading]
:param scores: (N)
:param thresh:
:return:
"""
assert boxes.shape[1] == 7
order = scores.sort(0, descending=True)[1]
boxes = boxes[order].contiguous()
keep = torch.LongTensor(boxes.size(0))
num_out = iou3d_nms_cuda.nms_normal_gpu( # noqa pylint: disable=I1101
boxes, keep, thresh
)
return order[keep[:num_out].cuda()].contiguous(), None
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/ops/iou3d_nms/iou3d_nms_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RoI-aware 3D Pooling."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/ops/roiaware_pool3d/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for RoI-aware 3D Pooling."""
import torch
import torch.nn as nn
from torch.autograd import Function
from ...utils import common_utils
from . import roiaware_pool3d_cuda
def points_in_boxes_cpu(points, boxes):
"""
Args:
points: (num_points, 3)
boxes: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center, each box DO NOT overlaps
Returns:
point_indices: (N, num_points)
"""
assert boxes.shape[1] == 7
assert points.shape[1] == 3
points, is_numpy = common_utils.check_numpy_to_torch(points)
boxes, is_numpy = common_utils.check_numpy_to_torch(boxes)
point_indices = points.new_zeros((boxes.shape[0], points.shape[0]), dtype=torch.int)
roiaware_pool3d_cuda.points_in_boxes_cpu( # noqa pylint: disable=I1101
boxes.float().contiguous(),
points.float().contiguous(),
point_indices
)
return point_indices.numpy() if is_numpy else point_indices
def points_in_boxes_gpu(points, boxes):
"""
:param points: (B, M, 3)
:param boxes: (B, T, 7), num_valid_boxes <= T
:return box_idxs_of_pts: (B, M), default background = -1
"""
assert boxes.shape[0] == points.shape[0]
assert boxes.shape[2] == 7 and points.shape[2] == 3
batch_size, num_points, _ = points.shape
box_idxs_of_pts = points.new_zeros((batch_size, num_points), dtype=torch.int).fill_(-1)
roiaware_pool3d_cuda.points_in_boxes_gpu( # noqa pylint: disable=I1101
boxes.contiguous(), points.contiguous(), box_idxs_of_pts
)
return box_idxs_of_pts
class RoIAwarePool3d(nn.Module):
"""RoI aware 3D Pooling."""
def __init__(self, out_size, max_pts_each_voxel=128):
"""Initialize."""
super().__init__()
self.out_size = out_size
self.max_pts_each_voxel = max_pts_each_voxel
def forward(self, rois, pts, pts_feature, pool_method='max'):
"""Forward."""
assert pool_method in ['max', 'avg']
return RoIAwarePool3dFunction.apply(rois, pts, pts_feature, self.out_size, self.max_pts_each_voxel, pool_method)
class RoIAwarePool3dFunction(Function):
"""RoIAware 3D Pooling Function."""
@staticmethod
def forward(ctx, rois, pts, pts_feature, out_size, max_pts_each_voxel, pool_method):
"""
Args:
ctx:
rois: (N, 7) [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center
pts: (npoints, 3)
pts_feature: (npoints, C)
out_size: int or tuple, like 7 or (7, 7, 7)
max_pts_each_voxel:
pool_method: 'max' or 'avg'
Returns:
pooled_features: (N, out_x, out_y, out_z, C)
"""
assert rois.shape[1] == 7 and pts.shape[1] == 3
if isinstance(out_size, int):
out_x = out_y = out_z = out_size
else:
assert len(out_size) == 3
for k in range(3):
assert isinstance(out_size[k], int)
out_x, out_y, out_z = out_size
num_rois = rois.shape[0]
num_channels = pts_feature.shape[-1]
num_pts = pts.shape[0]
pooled_features = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, num_channels))
argmax = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, num_channels), dtype=torch.int)
pts_idx_of_voxels = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, max_pts_each_voxel), dtype=torch.int)
pool_method_map = {'max': 0, 'avg': 1}
pool_method = pool_method_map[pool_method]
roiaware_pool3d_cuda.forward( # noqa pylint: disable=I1101
rois, pts, pts_feature, argmax,
pts_idx_of_voxels, pooled_features,
pool_method
)
ctx.roiaware_pool3d_for_backward = (pts_idx_of_voxels, argmax, pool_method, num_pts, num_channels)
return pooled_features
@staticmethod
def backward(ctx, grad_out):
"""
:param grad_out: (N, out_x, out_y, out_z, C)
:return:
grad_in: (npoints, C)
"""
pts_idx_of_voxels, argmax, pool_method, num_pts, num_channels = ctx.roiaware_pool3d_for_backward
grad_in = grad_out.new_zeros((num_pts, num_channels))
roiaware_pool3d_cuda.backward( # noqa pylint: disable=I1101
pts_idx_of_voxels, argmax,
grad_out.contiguous(), grad_in,
pool_method
)
return None, None, grad_in, None, None, None
if __name__ == '__main__':
pass
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/pcdet/ops/roiaware_pool3d/roiaware_pool3d_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PointPillars script module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export script for PointPillars."""
import argparse
import torch
from torch import nn
from torch.nn import functional as F
import onnx
from onnxsim import simplify
import os
try:
import tensorrt as trt # pylint: disable=unused-import # noqa: F401
from nvidia_tao_pytorch.pointcloud.pointpillars.tools.export.tensorrt import (
Calibrator,
ONNXEngineBuilder
)
trt_available = True
except: # noqa: E722
import logging
logger = logging.getLogger(__name__)
logger.warning(
"Failed to import TensorRT package, exporting TLT to a TensorRT engine "
"will not be available."
)
trt_available = False
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.core.path_utils import expand_path
from nvidia_tao_pytorch.pointcloud.pointpillars.tools.export.simplifier_onnx import (
simplify_onnx
)
from nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.config import cfg, cfg_from_yaml_file
from nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.models import load_checkpoint
from nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.utils import common_utils
from nvidia_tao_pytorch.pointcloud.pointpillars.tools.train_utils.train_utils import (
encrypt_onnx
)
class ExportablePFNLayer(nn.Module):
"""PFN layer replacement that can be exported to ONNX."""
def __init__(self, model):
"""Initialize."""
super().__init__()
self.model = model
def forward(self, inputs):
"""Forward method."""
inputs_shape = inputs.cpu().detach().numpy().shape
if len(inputs_shape) == 4:
inputs = inputs.view((-1, inputs_shape[2], inputs_shape[3]))
x = self.model.linear(inputs)
voxel_num_points = inputs_shape[-2]
if self.model.use_norm:
x = self.model.norm(x.permute(0, 2, 1))
x = F.relu(x)
x = F.max_pool1d(x, voxel_num_points, stride=1)
x_max = x.permute(0, 2, 1)
else:
x = F.relu(x)
x = x.permute(0, 2, 1)
x = F.max_pool1d(x, voxel_num_points, stride=1)
x_max = x.permute(0, 2, 1)
if len(inputs_shape) == 4:
x_max_shape = x_max.cpu().detach().numpy().shape
x_max = x_max.view((-1, inputs_shape[1], x_max_shape[2]))
else:
x_max = x_max.squeeze(1)
if self.model.last_vfe:
return x_max
x_repeat = x_max.repeat(1, inputs.shape[1], 1)
x_concatenated = torch.cat([x, x_repeat], dim=2)
return x_concatenated
class ExportablePillarVFE(nn.Module):
"""PillarVFE module replacement to it can be exported."""
def __init__(self, model):
"""Initialize."""
super().__init__()
self.model = model
def forward(self, voxel_features, voxel_num_points, coords):
"""Forward method."""
points_mean = voxel_features[..., :3].sum(dim=1, keepdim=True) / voxel_num_points.type_as(voxel_features).view(-1, 1, 1)
f_cluster = voxel_features[..., :3] - points_mean
f_center = torch.zeros_like(voxel_features[..., :3])
f_center[..., 0] = voxel_features[..., 0] - (coords[..., 3].to(voxel_features.dtype).unsqueeze(2) * self.model.voxel_x + self.model.x_offset)
f_center[..., 1] = voxel_features[..., 1] - (coords[..., 2].to(voxel_features.dtype).unsqueeze(2) * self.model.voxel_y + self.model.y_offset)
f_center[..., 2] = voxel_features[..., 2] - (coords[..., 1].to(voxel_features.dtype).unsqueeze(2) * self.model.voxel_z + self.model.z_offset)
if self.model.use_absolute_xyz:
features = [voxel_features, f_cluster, f_center]
else:
features = [voxel_features[..., 3:], f_cluster, f_center]
if self.model.with_distance:
points_dist = torch.norm(voxel_features[..., :3], 2, 2, keepdim=True)
features.append(points_dist)
features = torch.cat(features, dim=-1)
for pfn in self.model.pfn_layers:
exportable_pfn = ExportablePFNLayer(pfn)
features = exportable_pfn(features)
return features
class ExportableScatter(nn.Module):
"""Scatter module replacement that can be exported."""
def __init__(self, model):
"""Initialize."""
super().__init__()
self.model = model
def forward(self, pillar_features, coords):
"""Forward method."""
batch_spatial_features = []
batch_size = coords[..., 0].max().int().item() + 1
for batch_idx in range(batch_size):
spatial_feature = torch.zeros(
self.model.num_bev_features,
self.model.nz * self.model.nx * self.model.ny,
dtype=pillar_features.dtype,
device=pillar_features.device
)
batch_mask = coords[batch_idx, :, 0] == batch_idx
this_coords = coords[batch_idx, batch_mask, :]
indices = this_coords[:, 1] + this_coords[:, 2] * self.model.nx + this_coords[:, 3]
indices = indices.type(torch.long)
pillars = pillar_features[batch_idx, batch_mask, :]
pillars = pillars.t()
spatial_feature[:, indices] = pillars
batch_spatial_features.append(spatial_feature)
batch_spatial_features = torch.stack(batch_spatial_features, 0)
batch_spatial_features = batch_spatial_features.view(
-1, self.model.num_bev_features * self.model.nz,
self.model.ny, self.model.nx
)
return batch_spatial_features
class ExportableBEVBackbone(nn.Module):
"""Exportable BEV backbone."""
def __init__(self, model):
"""Initialize."""
super().__init__()
self.model = model
def forward(self, spatial_features):
"""Forward method."""
ups = []
x = spatial_features
for i in range(len(self.model.blocks)):
x = self.model.blocks[i](x)
if len(self.model.deblocks) > 0:
ups.append(self.model.deblocks[i](x))
else:
ups.append(x)
if len(ups) > 1:
x = torch.cat(ups, dim=1)
elif len(ups) == 1:
x = ups[0]
if len(self.model.deblocks) > len(self.model.blocks):
x = self.model.deblocks[-1](x)
return x
class ExportableAnchorHead(nn.Module):
"""Exportable Anchor Head."""
def __init__(self, model):
"""Initialize."""
super().__init__()
self.model = model
def forward(self, spatial_features_2d, batch_size):
"""Forward method."""
cls_preds = self.model.conv_cls(spatial_features_2d)
box_preds = self.model.conv_box(spatial_features_2d)
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]
box_preds = box_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]
if self.model.conv_dir_cls is not None:
dir_cls_preds = self.model.conv_dir_cls(spatial_features_2d)
dir_cls_preds = dir_cls_preds.permute(0, 2, 3, 1).contiguous()
else:
dir_cls_preds = None
return cls_preds, box_preds, dir_cls_preds
class ExportablePointPillar(nn.Module):
"""Exportable PointPillar model."""
def __init__(self, model):
"""Initialize."""
super().__init__()
self.module_list = model.module_list
self.exportable_vfe = ExportablePillarVFE(self.module_list[0])
self.exportable_scatter = ExportableScatter(self.module_list[1])
self.exportable_bev_backbone = ExportableBEVBackbone(self.module_list[2])
self.exportable_anchor_head = ExportableAnchorHead(self.module_list[3])
def forward(self, voxel_features, voxel_num_points, coords):
"""Forward method."""
self.batch_size = 1
pillar_features = self.exportable_vfe(voxel_features, voxel_num_points, coords) # "PillarVFE"
spatial_features = self.exportable_scatter(pillar_features, coords) # "PointPillarScatter"
spatial_features_2d = self.exportable_bev_backbone(spatial_features) # "BaseBEVBackbone"
cls_preds, box_preds, dir_cls_preds = self.exportable_anchor_head(spatial_features_2d, self.batch_size) # "AnchorHeadSingle"
return cls_preds, box_preds, dir_cls_preds
def parse_config():
"""Argument Parser."""
parser = argparse.ArgumentParser(description='Argument Parser')
parser.add_argument(
'--cfg_file', '-c', type=str, required=True,
help='PointPillars training config file'
)
parser.add_argument(
'--cal_data_path', '-d', type=str, required=False, default=None,
help='Path to the point cloud data directory'
)
parser.add_argument(
"--cal_cache_file",
type=str,
required=False,
default="./cal.bin",
help="Path to save the calibration file in INT8 mode"
)
parser.add_argument(
"--data_type", "-t",
required=False, default="fp32",
help="The data type for export(useful for TensorRT INT8 calibration)"
)
parser.add_argument(
"--save_engine",
"-e",
type=str,
required=False,
default=None,
help="Path to save the TensorRT engine."
)
parser.add_argument(
"--batch_size",
"-b",
type=int,
default=1,
required=False,
help="Batch size of the TensorRT engine to be generated(if --save_engine is provided)."
)
parser.add_argument(
"--cal_num_batches",
type=int,
default=8,
required=False,
help="Number of data batches for INT8 calibration"
)
parser.add_argument(
"--workspace_size",
"-w",
type=int,
default=1024,
required=False,
help="Workspace size in MB for TensorRT, default is 1024MB(1GB)."
)
parser.add_argument("--key", "-k", type=str, required=True, help="Encryption key")
args = parser.parse_args()
cfg_from_yaml_file(expand_path(args.cfg_file), cfg)
return args, cfg
def main():
"""Main function."""
if not trt_available:
raise ValueError("Failed to import tensorrt library, exporting to a Tensorrt engine not possible")
args, cfg = parse_config()
# INT8 is not yet fully supported, raise error if one tries to use it
if args.data_type.lower() == "int8":
raise ValueError("INT8 is not supported for PointPillars, please use FP32/FP16")
logger = common_utils.create_logger()
logger.info('Exporting the model...')
gpu_id = cfg.export.gpu_id or 0
torch.cuda.set_device(gpu_id)
if cfg.export.checkpoint is None:
raise OSError("Please provide export.checkpoint in config file")
if not os.path.isfile(cfg.export.checkpoint):
raise FileNotFoundError(f"Input model {cfg.export.checkpoint} does not exist")
if cfg.export.onnx_file is None:
split_name = os.path.splitext(cfg.export.checkpoint)[0]
output_file = "{}.onnx".format(split_name)
else:
output_file = cfg.export.onnx_file
# Warn the user if an exported file already exists.
assert not os.path.exists(output_file), "Default onnx file {} already "\
"exists".format(output_file)
# Make an output directory if necessary.
output_root = os.path.dirname(os.path.realpath(output_file))
if not os.path.exists(output_root):
os.makedirs(output_root)
if output_file.endswith('.etlt'):
tmp_onnx_file = output_file.replace('.etlt', '.onnx')
else:
tmp_onnx_file = output_file
# Set up status logging
result_dir = os.path.dirname(output_file)
status_file = os.path.join(result_dir, "status.json")
status_logging.set_status_logger(status_logging.StatusLogger(filename=status_file, append=True))
status_logging.get_status_logger().write(status_level=status_logging.Status.STARTED, message="Starting PointPillars export")
# Load model
loaded_model = load_checkpoint(cfg.export.checkpoint, args.key)[0]
model = ExportablePointPillar(loaded_model)
model.cuda()
model.eval()
with torch.no_grad():
MAX_VOXELS = cfg.dataset.data_processor[2].max_number_of_voxels["test"]
MAX_POINTS = cfg.dataset.data_processor[2].max_points_per_voxel
NUM_POINT_FEATS = cfg.dataset.data_augmentor.aug_config_list[0].num_point_features
dummy_voxel_features = torch.zeros(
(1, MAX_VOXELS, MAX_POINTS, NUM_POINT_FEATS),
dtype=torch.float32,
device='cuda:0'
)
dummy_voxel_num_points = torch.zeros(
(1, MAX_VOXELS,),
dtype=torch.int32,
device='cuda:0'
)
dummy_coords = torch.zeros(
# 4: (batch_idx, x, y, z)
(1, MAX_VOXELS, 4),
dtype=torch.int32,
device='cuda:0'
)
torch.onnx.export(
model,
(dummy_voxel_features, dummy_voxel_num_points, dummy_coords),
tmp_onnx_file,
export_params=True,
opset_version=11,
do_constant_folding=True,
keep_initializers_as_inputs=True,
input_names=['input', 'voxel_num_points', 'coords'],
output_names=['cls_preds', 'box_preds', 'dir_cls_preds'],
dynamic_axes={
"input": {0: "batch"},
"voxel_num_points": {0: "batch"},
"coords": {0: "batch"}
}
)
onnx_model = onnx.load(tmp_onnx_file)
model_simp, check = simplify(
onnx_model,
overwrite_input_shapes={
"input": (1, MAX_VOXELS, MAX_POINTS, NUM_POINT_FEATS),
'voxel_num_points': (1, MAX_VOXELS),
'coords': (1, MAX_VOXELS, 4)
}
)
assert check, "Failed on simplifying the ONNX model"
model_simp = simplify_onnx(model_simp, cfg)
onnx.save(model_simp, tmp_onnx_file)
if output_file.endswith('.etlt') and args.key:
# encrypt the onnx if and only if key is provided and output file name ends with .etlt
encrypt_onnx(tmp_file_name=tmp_onnx_file,
output_file_name=output_file,
key=args.key)
logger.info(f'Model exported to {output_file}')
status_logging.get_status_logger().write(
status_level=status_logging.Status.RUNNING,
message=f'Model exported to {output_file}'
)
# Save TRT engine
if args.save_engine is not None:
if args.data_type.lower() == "int8":
if args.cal_data_path is not None:
calibrator = Calibrator(
args.cal_data_path,
args.cal_cache_file,
args.cal_num_batches,
args.batch_size,
cfg.inference.max_points_num
)
else:
raise ValueError("Cannot find caliration data path")
else:
calibrator = None
builder = ONNXEngineBuilder(
tmp_onnx_file,
max_batch_size=args.batch_size,
min_batch_size=args.batch_size,
opt_batch_size=args.batch_size,
dtype=args.data_type,
max_workspace_size=args.workspace_size * 1024 * 1024,
dynamic_batch=True,
calibrator=calibrator
)
engine = builder.get_engine()
with open(expand_path(args.save_engine), "wb") as outf:
outf.write(engine.serialize())
logger.info(f'TensorRT engine saved to {args.save_engine}')
status_logging.get_status_logger().write(
status_level=status_logging.Status.RUNNING,
message=f'TensorRT engine saved to {args.save_engine}'
)
if output_file.endswith('.etlt') and args.key:
os.remove(tmp_onnx_file)
if __name__ == '__main__':
try:
main()
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Export finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Export was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/scripts/export.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset convert script for PointPillars."""
import os
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.core.path_utils import expand_path
from nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.datasets.general.pc_dataset import create_pc_infos
import argparse
from easydict import EasyDict
from pathlib import Path
import yaml
def parse_args(args=None):
"""Argument Parser."""
parser = argparse.ArgumentParser(description="General point cloud dataset converter.")
parser.add_argument("--cfg_file", "-c", type=str, help="Config file.")
return parser.parse_known_args(args)[0]
if __name__ == "__main__":
args = parse_args()
cfg_file = expand_path(args.cfg_file)
with open(cfg_file) as f:
cfg = EasyDict(yaml.load(f, Loader=yaml.FullLoader)) # nosec
names = cfg.dataset.class_names
data_path = cfg.dataset.data_path
results_dir = cfg.results_dir
# Set status logging
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(status_logging.StatusLogger(filename=status_file, append=True))
status_logging.get_status_logger().write(status_level=status_logging.Status.STARTED, message="Starting PointPillars dataset convert")
try:
create_pc_infos(
dataset_cfg=cfg.dataset,
class_names=names,
data_path=Path(data_path),
save_path=Path(data_path),
status_logging=status_logging
)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Dataset convert finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Dataset convert was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/scripts/dataset_convert.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training script for PointPillars."""
import argparse
import datetime
import os
from pathlib import Path
import shutil
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.core.path_utils import expand_path
from nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.config import (
cfg,
cfg_from_yaml_file,
)
from nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.datasets import build_dataloader
from nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.models import (
build_model_and_optimizer,
model_fn_decorator
)
from nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.utils import common_utils
from nvidia_tao_pytorch.pointcloud.pointpillars.tools.train_utils.optimization import build_optimizer, build_scheduler
from nvidia_tao_pytorch.pointcloud.pointpillars.tools.train_utils.train_utils import train_model
def parse_config():
"""Argument Parser."""
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument("--output_dir", type=str, required=False, default=None, help="output checkpoint directory.")
parser.add_argument("--gpus", "-g", type=int, default=1, help="Number of GPUs to run the training")
parser.add_argument("--key", "-k", type=str, required=True, help="Encryption key")
args = parser.parse_args()
cfg_from_yaml_file(expand_path(args.cfg_file), cfg)
return args, cfg
def main():
"""Main function."""
args, cfg = parse_config()
args.workers = cfg.dataset.num_workers
args.sync_bn = cfg.model.sync_bn
args.batch_size = cfg.train.batch_size
args.epochs = cfg.train.num_epochs
args.ckpt = cfg.train.resume_training_checkpoint_path
args.pretrained_model = cfg.model.pretrained_model_path
args.pruned_model = cfg.train.pruned_model_path
args.tcp_port = cfg.train.tcp_port
args.fix_random_seed = cfg.train.random_seed
args.ckpt_save_interval = cfg.train.checkpoint_interval
args.max_ckpt_save_num = cfg.train.max_checkpoint_save_num
args.merge_all_iters_to_one_epoch = cfg.train.merge_all_iters_to_one_epoch
if args.gpus == 1:
dist_train = False
total_gpus = 1
elif args.gpus > 1:
total_gpus, cfg.LOCAL_RANK = common_utils.init_dist_pytorch(
args.local_rank, backend='nccl'
)
dist_train = True
else:
raise ValueError(f"Number of GPUs should be >=1, got: {args.gpus}")
if args.fix_random_seed is not None:
common_utils.set_random_seed(args.fix_random_seed)
if args.output_dir is None:
if cfg.results_dir is None:
raise OSError("Either provide results_dir in config file or provide output_dir as a CLI argument")
else:
args.output_dir = cfg.results_dir
output_dir = Path(expand_path(args.output_dir))
output_dir.mkdir(parents=True, exist_ok=True)
log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# Set up status logger
status_file = os.path.join(str(output_dir), "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
append=True
)
)
status_logging.get_status_logger().write(
status_level=status_logging.Status.STARTED,
message="Starting PointPillars training"
)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_train:
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
if cfg.LOCAL_RANK == 0:
# os.system('cp %s %s' % (args.cfg_file, output_dir))
os.makedirs(expand_path(output_dir), exist_ok=True)
shutil.copyfile(args.cfg_file, os.path.join(output_dir, os.path.basename(args.cfg_file)))
tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None
# -----------------------create dataloader & network & optimizer---------------------------
train_set, train_loader, train_sampler = build_dataloader(
dataset_cfg=cfg.dataset,
class_names=cfg.class_names,
batch_size=args.batch_size,
dist=dist_train,
workers=args.workers,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
model, opt_state, start_epoch, it = build_model_and_optimizer(
cfg.model,
len(cfg.class_names),
train_set,
args.pruned_model,
args.ckpt,
args.pretrained_model,
dist_train,
logger,
args.key
)
optimizer = build_optimizer(model, cfg.train)
if opt_state is not None:
optimizer.load_state_dict(opt_state)
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
# Reload optimizer states after model moved to cuda() so the optimizer
# states are also moved to the same device
optimizer.load_state_dict(optimizer.state_dict())
model.train() # before wrap to DistributedDataParallel to support fixed some parameters
if dist_train:
model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()])
# Build LR scheduler
last_epoch = -1
if start_epoch > 0:
last_epoch = start_epoch + 1
lr_scheduler, lr_warmup_scheduler = build_scheduler(
optimizer, total_iters_each_epoch=len(train_loader), total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.train
)
logger.info('**********************Start training**********************')
train_model(
model,
optimizer,
train_loader,
model_func=model_fn_decorator(),
lr_scheduler=lr_scheduler,
optim_cfg=cfg.train,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=tb_log,
status_logging=status_logging,
ckpt_save_dir=output_dir,
key=args.key,
train_sampler=train_sampler,
lr_warmup_scheduler=lr_warmup_scheduler,
ckpt_save_interval=args.ckpt_save_interval,
max_ckpt_save_num=args.max_ckpt_save_num,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch
)
logger.info('**********************End training**********************')
if __name__ == '__main__':
try:
main()
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Training finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Training was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/scripts/train.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pruning script for PointPillars."""
import argparse
import datetime
import os
from pathlib import Path
import tempfile
import torch
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.core.path_utils import expand_path
import nvidia_tao_pytorch.pruning.torch_pruning as tp
from nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.config import (
cfg, cfg_from_yaml_file
)
from nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.utils import common_utils
from nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.datasets import build_dataloader
from nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.models import (
load_checkpoint,
load_data_to_gpu
)
from nvidia_tao_pytorch.pointcloud.pointpillars.tools.train_utils.train_utils import (
encrypt_pytorch
)
def parse_args(args=None):
"""Argument Parser."""
parser = argparse.ArgumentParser(description="model pruning")
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--output_dir', type=str, default=None, help='output directory.')
parser.add_argument('--workers', type=int, default=8, help='number of workers for dataloader')
parser.add_argument('--pruning_thresh', "-pth", type=float, default=0.1, help='Pruning threshold')
parser.add_argument("--key", "-k", type=str, required=True, help="Encryption key")
args = parser.parse_args()
cfg_from_yaml_file(expand_path(args.cfg_file), cfg)
return args, cfg
def prune_model():
"""Prune the PointPillars model."""
args, cfg = parse_args()
dist_train = False
args.batch_size = 1
args.epochs = cfg.train.num_epochs
threshold = args.pruning_thresh
if args.output_dir is None:
if cfg.results_dir is None:
raise OSError("Either provide results_dir in config file or provide output_dir as a CLI argument")
else:
args.output_dir = cfg.results_dir
args.output_dir = expand_path(args.output_dir)
output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# Set status logging
status_file = os.path.join(str(output_dir), "status.json")
status_logging.set_status_logger(status_logging.StatusLogger(filename=status_file, append=True))
status_logging.get_status_logger().write(status_level=status_logging.Status.STARTED, message="Starting PointPillars Pruning")
# -----------------------create dataloader & network & optimizer---------------------------
train_loader = build_dataloader(
dataset_cfg=cfg.dataset,
class_names=cfg.class_names,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=False,
total_epochs=args.epochs
)[1]
input_dict = next(iter(train_loader))
load_data_to_gpu(input_dict)
if cfg.prune.model is None:
raise OSError("Please provide prune.model in config file")
if not os.path.exists(expand_path(cfg.prune.model)):
raise OSError(f"Model not found: {cfg.prune.model}")
model = load_checkpoint(cfg.prune.model, args.key)[0]
model = model.cuda()
model = model.eval()
unpruned_total_params = sum(p.numel() for p in model.parameters())
strategy = tp.strategy.L1Strategy() # or tp.strategy.RandomStrategy()
DG = tp.DependencyGraph()
DG.build_dependency(model, example_inputs=input_dict)
# conv layers
layers = [module for module in model.modules() if isinstance(module, torch.nn.Conv2d)]
# Exclude heads
black_list = layers[-3:]
count = 0
for layer in layers:
if layer in black_list:
continue
# can run some algo here to generate threshold for every node
threshold_run = threshold
pruning_idxs = strategy(layer.weight, amount=threshold_run)
pruning_plan = DG.get_pruning_plan(layer, tp.prune_conv, idxs=pruning_idxs)
if pruning_plan is not None:
pruning_plan.exec()
else:
continue
count += 1
pruned_total_params = sum(p.numel() for p in model.parameters())
print("Pruning ratio: {}".format(
pruned_total_params / unpruned_total_params)
)
status_logging.get_status_logger().write(
status_level=status_logging.Status.RUNNING,
message="Pruning ratio: {}".format(pruned_total_params / unpruned_total_params)
)
save_path = expand_path(f"{args.output_dir}/pruned_{threshold}.tlt")
handle, temp_file = tempfile.mkstemp()
os.close(handle)
torch.save(model, temp_file)
encrypt_pytorch(temp_file, save_path, args.key)
print(f"Pruned model saved to {save_path}")
status_logging.get_status_logger().write(
status_level=status_logging.Status.RUNNING,
message=f"Pruned model saved to {save_path}"
)
return model
if __name__ == "__main__":
try:
prune_model()
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Pruning finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Pruning was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/scripts/prune.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inference script for PointPillars."""
import argparse
import datetime
import os
from pathlib import Path
import numpy as np
import torch
from torch import nn
try:
import tensorrt as trt # pylint: disable=unused-import # noqa: F401
from nvidia_tao_pytorch.pointcloud.pointpillars.tools.export.tensorrt_model import TrtModel
except: # noqa: E722
import logging
logger = logging.getLogger(__name__)
logger.warning(
"Failed to import TensorRT package, "
"inference with TensorRT engine will not be available."
)
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.core.path_utils import expand_path
from nvidia_tao_pytorch.pointcloud.pointpillars.tools.eval_utils import eval_utils
from nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.config import cfg, cfg_from_yaml_file, log_config_to_file
from nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.datasets import build_dataloader
from nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.models import load_checkpoint
from nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.models.model_utils import model_nms_utils
from nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.utils import common_utils
def parse_config():
"""Argument Parser."""
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
parser.add_argument("--output_dir", type=str, required=False, default=None, help="output checkpoint directory.")
parser.add_argument(
"--trt_engine",
type=str,
required=False,
default=None,
help="Path to the TensorRT engine to be used for inference"
)
parser.add_argument("--key", "-k", type=str, required=True, help="Encryption key")
args = parser.parse_args()
cfg_from_yaml_file(expand_path(args.cfg_file), cfg)
np.random.seed(1024)
return args, cfg
def parse_epoch_num(model_file):
"""Parse epoch number from model file."""
model_base = os.path.basename(model_file)
epoch_string = model_base[:-4].split("_")[-1]
return int(epoch_string)
def infer_single_ckpt(
model, test_loader, args,
infer_output_dir, logger,
cfg
):
"""Do inference with PyTorch model."""
model.cuda()
eval_utils.infer_one_epoch(
cfg, model, test_loader, logger,
result_dir=infer_output_dir, save_to_file=args.save_to_file
)
def infer_single_ckpt_trt(
model, test_loader, args,
infer_output_dir, logger,
cfg
):
"""Do inference with TensorRT engine."""
eval_utils.infer_one_epoch_trt(
cfg, model, test_loader, logger,
result_dir=infer_output_dir, save_to_file=args.save_to_file
)
class CustomNMS(nn.Module):
"""Customized NMS module."""
def __init__(self, post_process_cfg):
"""Initialize."""
super().__init__()
self.post_process_cfg = post_process_cfg
def forward(self, output_boxes, num_boxes):
"""Forward method."""
batch_output = []
for idx, box_per_frame in enumerate(output_boxes):
num_box_per_frame = num_boxes[idx]
box_per_frame = torch.from_numpy(box_per_frame).cuda()
box_per_frame = box_per_frame[:num_box_per_frame, ...]
box_preds = box_per_frame[:, 0:7]
label_preds = box_per_frame[:, 7] + 1
cls_preds = box_per_frame[:, 8]
selected, selected_scores = model_nms_utils.class_agnostic_nms(
box_scores=cls_preds, box_preds=box_preds,
nms_config=self.post_process_cfg.nms_config,
score_thresh=self.post_process_cfg.score_thresh
)
final_scores = selected_scores
final_labels = label_preds[selected]
final_boxes = box_preds[selected]
final_output = torch.cat(
[
final_boxes,
final_scores.view((-1, 1)),
final_labels.view((-1, 1))
],
axis=-1
)
batch_output.append(final_output.cpu().numpy())
return batch_output
class CustomPostProcessing(nn.Module):
"""Customized PostProcessing module."""
def __init__(self, model, cfg):
"""Initialize."""
super().__init__()
self.model = model
self.custom_nms = CustomNMS(cfg)
def forward(self, output_boxes, num_boxes):
"""Forward method."""
return self.custom_nms(
output_boxes,
num_boxes
)
class TrtModelWrapper():
"""TensorRT engine wrapper."""
def __init__(self, model, cfg, trt_model):
"""Initialize."""
self.model = model
self.cfg = cfg
self.trt_model = trt_model
self.post_processor = CustomPostProcessing(
self.model,
self.cfg.model.post_processing
)
def __call__(self, input_dict):
"""Call method."""
trt_output = self.trt_model.predict(input_dict)
return self.post_processor(
trt_output["output_boxes"],
trt_output["num_boxes"],
)
def main():
"""Main function."""
args, cfg = parse_config()
args.batch_size = cfg.inference.batch_size
args.workers = cfg.dataset.num_workers
args.ckpt = cfg.inference.checkpoint
if args.output_dir is None:
if cfg.results_dir is None:
raise OSError("Either provide results_dir in config file or provide output_dir as a CLI argument")
else:
args.output_dir = cfg.results_dir
output_dir = Path(expand_path(args.output_dir))
output_dir.mkdir(parents=True, exist_ok=True)
infer_output_dir = output_dir / 'infer'
infer_output_dir.mkdir(parents=True, exist_ok=True)
log_file = infer_output_dir / ('log_infer_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=0)
# log to file
logger.info('**********************Start logging**********************')
# Set status logging
status_file = os.path.join(str(infer_output_dir), "status.json")
status_logging.set_status_logger(status_logging.StatusLogger(filename=status_file, append=True))
status_logging.get_status_logger().write(status_level=status_logging.Status.STARTED, message="Starting PointPillars inference")
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
test_loader = build_dataloader(
dataset_cfg=cfg.dataset,
class_names=cfg.class_names,
batch_size=args.batch_size,
dist=False, workers=args.workers, logger=logger, training=False
)[1]
model = load_checkpoint(
args.ckpt,
args.key
)[0]
# Try to load TRT engine if there is any
if args.trt_engine is not None:
trt_model = TrtModel(
args.trt_engine,
args.batch_size,
)
trt_model.build_or_load_trt_engine()
# Check the batch size
engine_batch_size = trt_model.engine._engine.get_binding_shape(0)[0]
if engine_batch_size != args.batch_size:
raise ValueError(f"TensorRT engine batch size: {engine_batch_size}, mismatch with "
f"batch size for evaluation: {args.batch_size}. "
"Please make sure they are the same by generating a new engine or "
f"modifying the evaluation batch size in spec file to {engine_batch_size}.")
model_wrapper = TrtModelWrapper(
model,
cfg,
trt_model
)
with torch.no_grad():
infer_single_ckpt_trt(
model_wrapper, test_loader, args,
infer_output_dir, logger, cfg
)
else:
# Load model from checkpoint
with torch.no_grad():
infer_single_ckpt(
model, test_loader, args, infer_output_dir,
logger, cfg
)
if __name__ == '__main__':
try:
main()
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Inference finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Inference was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluation script for PointPillars."""
import argparse
import datetime
import os
from pathlib import Path
import numpy as np
import torch
from torch import nn
# force pycuda on primary context before using TensorRT
import pycuda
import pycuda.autoinit
try:
import tensorrt as trt # pylint: disable=unused-import # noqa: F401
from nvidia_tao_pytorch.pointcloud.pointpillars.tools.export.tensorrt_model import TrtModel
except: # noqa: E722
import logging
logger = logging.getLogger(__name__)
logger.warning(
"Failed to import TensorRT package, "
"evaluation with TensorRT engine will not be available."
)
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.core.path_utils import expand_path
from nvidia_tao_pytorch.pointcloud.pointpillars.tools.eval_utils import eval_utils
from nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.config import cfg, cfg_from_yaml_file
from nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.datasets import build_dataloader
from nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.models import load_checkpoint
from nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.models.model_utils import model_nms_utils
from nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.utils import common_utils
pyc_dev = pycuda.autoinit.device
pyc_ctx = pyc_dev.retain_primary_context()
def parse_config():
"""Argument Parser."""
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
parser.add_argument("--output_dir", type=str, required=False, default=None, help="output checkpoint directory.")
parser.add_argument(
"--trt_engine",
type=str,
required=False,
default=None,
help="Path to the TensorRT engine to be used for evaluation"
)
parser.add_argument("--key", "-k", type=str, required=True, help="Encryption key")
args = parser.parse_args()
cfg_from_yaml_file(expand_path(args.cfg_file), cfg)
np.random.seed(1024)
return args, cfg
def parse_epoch_num(model_file):
"""Parse epoch number from model file."""
model_base = os.path.basename(model_file)
epoch_string = model_base[:-4].split("_")[-1]
return int(epoch_string)
def eval_single_ckpt(
model, test_loader, args,
eval_output_dir, logger, epoch_id,
cfg, dist_test=False
):
"""Evaluation with PyTorch model."""
model.cuda()
# start evaluation
return eval_utils.eval_one_epoch(
cfg, model, test_loader, epoch_id, logger, dist_test=dist_test,
result_dir=eval_output_dir, save_to_file=args.save_to_file
)
def eval_single_ckpt_trt(
model, test_loader, args,
eval_output_dir, logger,
cfg, dist_test=False
):
"""Evaluation with TensorRT engine."""
return eval_utils.eval_one_epoch_trt(
cfg, model, test_loader, logger, dist_test=dist_test,
result_dir=eval_output_dir, save_to_file=args.save_to_file
)
class CustomNMS(nn.Module):
"""NMS module."""
def __init__(self, post_process_cfg):
"""Initialize."""
super().__init__()
self.post_process_cfg = post_process_cfg
def forward(self, output_boxes, num_boxes):
"""Forward method."""
batch_output = []
for idx, box_per_frame in enumerate(output_boxes):
num_box_per_frame = num_boxes[idx]
box_per_frame = torch.from_numpy(box_per_frame).cuda()
box_per_frame = box_per_frame[:num_box_per_frame, ...]
box_preds = box_per_frame[:, 0:7]
label_preds = box_per_frame[:, 7] + 1
cls_preds = box_per_frame[:, 8]
selected, selected_scores = model_nms_utils.class_agnostic_nms(
box_scores=cls_preds, box_preds=box_preds,
nms_config=self.post_process_cfg.nms_config,
score_thresh=self.post_process_cfg.score_thresh
)
final_scores = selected_scores
final_labels = label_preds[selected]
final_boxes = box_preds[selected]
final_output = torch.cat(
[
final_boxes,
final_scores.view((-1, 1)),
final_labels.view((-1, 1))
],
axis=-1
)
batch_output.append(final_output.cpu().numpy())
return batch_output
class CustomPostProcessing(nn.Module):
"""PostProcessing module."""
def __init__(self, model, cfg):
"""Initialize."""
super().__init__()
self.model = model
self.custom_nms = CustomNMS(cfg)
def forward(self, output_boxes, num_boxes):
"""Forward method."""
return self.custom_nms(
output_boxes,
num_boxes
)
class TrtModelWrapper():
"""TensorRT model wrapper."""
def __init__(self, model, cfg, trt_model):
"""Initialize."""
self.model = model
self.cfg = cfg
self.trt_model = trt_model
self.post_processor = CustomPostProcessing(
self.model,
self.cfg.model.post_processing
)
def __call__(self, input_dict):
"""call method."""
trt_output = self.trt_model.predict(input_dict)
return self.post_processor(
trt_output["output_boxes"],
trt_output["num_boxes"],
)
def main():
"""Main function."""
args, cfg = parse_config()
args.batch_size = cfg.evaluate.batch_size
args.workers = cfg.dataset.num_workers
args.ckpt = cfg.evaluate.checkpoint
if args.output_dir is None:
if cfg.results_dir is None:
raise OSError("Either provide results_dir in config file or provide output_dir as a CLI argument")
else:
args.output_dir = cfg.results_dir
output_dir = Path(expand_path(args.output_dir))
output_dir.mkdir(parents=True, exist_ok=True)
eval_output_dir = output_dir / 'eval'
eval_output_dir.mkdir(parents=True, exist_ok=True)
# Set status logging
status_file = os.path.join(str(eval_output_dir), "status.json")
status_logging.set_status_logger(status_logging.StatusLogger(filename=status_file, append=True))
status_logging.get_status_logger().write(
status_level=status_logging.Status.STARTED,
message="Starting PointPillars evaluation"
)
log_file = eval_output_dir / ('log_eval_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=0)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
test_loader = build_dataloader(
dataset_cfg=cfg.dataset,
class_names=cfg.class_names,
batch_size=args.batch_size,
dist=False, workers=args.workers, logger=logger, training=False
)[1]
checkpoint_loaded = load_checkpoint(
args.ckpt,
args.key
)
model = checkpoint_loaded[0]
epoch_num = checkpoint_loaded[2]
# Try to load TRT engine if there is any
if args.trt_engine is not None:
trt_model = TrtModel(
args.trt_engine,
args.batch_size,
)
trt_model.build_or_load_trt_engine()
# Check the batch size
engine_batch_size = trt_model.engine._engine.get_binding_shape(0)[0]
if engine_batch_size != args.batch_size:
raise ValueError(f"TensorRT engine batch size: {engine_batch_size}, mismatch with "
f"batch size for evaluation: {args.batch_size}. "
"Please make sure they are the same by generating a new engine or "
f"modifying the evaluation batch size in spec file to {engine_batch_size}.")
model_wrapper = TrtModelWrapper(
model,
cfg,
trt_model
)
with torch.no_grad():
ret_dict = eval_single_ckpt_trt(
model_wrapper, test_loader, args,
eval_output_dir, logger, cfg, dist_test=False
)
status_logging.get_status_logger().kpi = ret_dict
status_logging.get_status_logger().write(
message="Evaluation metrics generated.",
status_level=status_logging.Status.RUNNING
)
else:
# Load model from checkpoint
with torch.no_grad():
ret_dict = eval_single_ckpt(
model, test_loader, args, eval_output_dir,
logger, epoch_num, cfg, dist_test=False
)
status_logging.get_status_logger().kpi = ret_dict
status_logging.get_status_logger().write(
message="Evaluation metrics generated.",
status_level=status_logging.Status.RUNNING
)
if __name__ == '__main__':
pyc_ctx.push()
try:
main()
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Evaluation finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Evaluation was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
pyc_ctx.pop()
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/scripts/evaluate.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entrypoint script for the PointPillars task."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/entrypoint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""'Entry point' script running subtasks related to PointPillars.
"""
import importlib
import os
import pkgutil
import argparse
import subprocess
import sys
from nvidia_tao_pytorch.core.path_utils import expand_path
from nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.config import (
cfg,
cfg_from_yaml_file,
)
import nvidia_tao_pytorch.pointcloud.pointpillars.scripts as scripts
def get_subtasks(package):
"""Get supported subtasks for a given task.
This function lists out the tasks in in the .scripts folder.
Returns:
subtasks (dict): Dictionary of files.
"""
module_path = package.__path__
modules = {}
# Collect modules dynamically.
for _, task, is_package in pkgutil.walk_packages(module_path):
if is_package:
continue
module_name = package.__name__ + '.' + task
module_details = {
"module_name": module_name,
"runner_path": os.path.abspath(importlib.import_module(module_name).__file__),
}
modules[task] = module_details
return modules
def launch(parser, subtasks):
"""CLI function that executes subtasks.
Args:
parser: Created parser object for a given task.
subtasks: list of subtasks for a given task.
"""
# Subtasks for a given model.
parser.add_argument(
'subtask', default='train', choices=subtasks.keys(), help="Subtask for a given task/model.",
)
# Add standard TLT arguments.
parser.add_argument(
"-r",
"--results_dir",
help="Path to a folder where the experiment outputs should be written.",
default=None,
required=False,
)
parser.add_argument("-k", "--key", help="User specific encoding key to save or load a .tlt model.")
parser.add_argument("-e", "--experiment_spec_file", help="Path to the experiment spec file.", default=None)
parser.add_argument("--gpus", "-g", type=int, default=1, help="Number of GPUs")
# Parse the arguments.
args, unknown_args = parser.parse_known_args()
script_args = ""
# Process spec file for all commands except the one for getting spec files ;)
# Make sure the user provides spec file.
if args.experiment_spec_file is None:
print("ERROR: The subtask `{}` requires the following argument: -e/--experiment_spec_file".format(args.subtask))
exit(1)
# Make sure the file exists!
if not os.path.exists(args.experiment_spec_file):
print("ERROR: The indicated experiment spec file `{}` doesn't exist!".format(args.experiment_spec_file))
exit(1)
script_args += " --cfg_file " + args.experiment_spec_file
# And add other params AFTERWARDS!
if args.subtask in ["train", "evaluate", "inference", "prune"]:
if args.results_dir:
script_args += " --output_dir " + args.results_dir
# Add encryption key.
if args.subtask in ["train", "evaluate", "inference", "export", "prune"]:
if args.key is not None:
script_args += " --key " + args.key
# Number of GPUs
if args.gpus > 1:
if args.subtask != "train":
raise ValueError("Only train task support multi-gpu")
else:
script_args += " --gpus " + str(args.gpus)
# Find relevant module and pass args.
script = subtasks[args.subtask]["runner_path"]
# Pass unknown args to call
unknown_args_as_str = " ".join(unknown_args)
# Create a system call.
if args.gpus == 1:
call = "python " + script + script_args + " " + unknown_args_as_str
else:
cfg_from_yaml_file(expand_path(args.experiment_spec_file), cfg)
call = (f"python -m torch.distributed.launch --nproc_per_node={args.gpus} --rdzv_endpoint=localhost:{cfg.train.tcp_port} " + script + script_args + " " + unknown_args_as_str)
print(call)
try:
# Run the script.
subprocess.check_call(call, shell=True, stdout=sys.stdout, stderr=sys.stdout)
except subprocess.CalledProcessError as e:
if e.output is not None:
print(e.output)
exit(1)
def main():
"""Main entrypoint wrapper."""
# Create parser for a given task.
parser = argparse.ArgumentParser(
"pointpillars", add_help=True, description="TAO PointPillars"
)
# Build list of subtasks by inspecting the package.
subtasks = get_subtasks(scripts)
# Parse the arguments and launch the subtask.
launch(parser, subtasks)
if __name__ == '__main__':
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/entrypoint/pointpillars.py |
#!/usr/bin/python
import os
import sys
import re
from submodules.rules import rules
from nvidia_tao_pytorch.core.path_utils import expand_path
def main():
file_path = expand_path(sys.argv[1])
with open(file_path, "r") as fp:
lines = fp.readlines()
for idx, line in enumerate(lines):
if line.strip() == "# ------------------------ >8 ------------------------":
break
if line[0] == "#":
continue
if not line_valid(idx, line):
print(f"line# {idx} failed")
show_rules()
sys.exit(1)
sys.exit(0)
def line_valid(idx, line):
if idx == 0:
#return re.match("^[A-Z].{,48}[0-9A-z \t]$", line)
return re.match("^\[((?!\s*$).{0,15})\][ \t].*?[A-Z].{0,48}[0-9A-z \t]$", line)
else:
return len(line.strip()) <= 72
def show_rules():
print(rules)
if __name__ == "__main__":
main() | tao_pytorch_backend-main | scripts/git-hooks/commit-msg.py |
tao_pytorch_backend-main | scripts/git-hooks/submodules/__init__.py |
|
rules = """
# Failing to add message in the mentioned format will
# cause your local commit fail.
#
# Please follow these rules for commit messages:
# ==============================================
# 1. Commit message format - first line is mandatory
# [YOUR_MODULE_NAME] Subject line here not exceeding 50 characters
# * Optional line entry with detail not exceeding 72 characters
# * Optional line entry with detail not exceeding 72 characters
# * Optional line entry with detail not exceeding 72 characters
# 2. Limit the module name (YOUR_MODULE_NAME) to 15 characters length
# 3. Limit the subject(Text part after [YOUR_MODULE_NAME]) line to max
# 50 characters
# 4. Start subject (Text part after [YOUR_MODULE_NAME]) with a Capital
# letter and don't end with a period '.'
# 5. Wrap the body lines (if any) at 72 characters
""" | tao_pytorch_backend-main | scripts/git-hooks/submodules/rules.py |
#!/bin/env python3
from __future__ import print_function
from os import listdir
from os.path import isfile, join
import datetime
import os.path
import sys
import glob
import subprocess
import hashlib
# https://github.com/fedora-modularity/libmodulemd/blob/master/spec.v2.yaml
# We need a module.yaml file that we pass to modifyrepo so dnf knows
# how the stream are made up.
#
# Here we generate the module.yaml file.
KMOD_PKG_PREFIX = 'kmod-nvidia'
DESCRIPTION = [
'This package provides the most recent NVIDIA display driver which allows for',
'hardware accelerated rendering with recent NVIDIA chipsets.',
'',
'For the full product support list, please consult the release notes for',
'driver version {version}.',
]
# Unrelated to the version a branch is at, we always
# use the latest version of these rpms in every branch
LATEST_PKGS = [
'dnf-plugin-nvidia',
]
# Main package must be first!
BRANCH_PKGS = [
'nvidia-driver',
'nvidia-driver-libs',
'nvidia-driver-devel',
'nvidia-driver-NVML',
'nvidia-driver-NvFBCOpenGL',
'nvidia-driver-cuda',
'nvidia-driver-cuda-libs',
'nvidia-persistenced',
'nvidia-modprobe',
'nvidia-settings',
'nvidia-libXNVCtrl',
'nvidia-libXNVCtrl-devel',
'nvidia-xconfig',
'nvidia-kmod-common',
'cuda-drivers',
]
# Add-ons
OPTIONAL_PKGS = [
'nvidia-kmod-headers',
'nvidia-kmod-source',
'nvidia-fabric-manager',
]
class Writer:
output = ''
def line(self, str):
self.output += str + '\n'
def write(self, target):
if len(target) == 0:
print(self.output)
else:
with open(target, 'w') as text_file:
print(self.output, file=text_file)
def tab(self):
self.output += ' '
return self
def next(self):
self.output += '...\n---\n'
class Branch:
def __init__(self, name, major, minor, micro = None, arch = "noarch"):
self.name = name
self.major = major
self.minor = minor
self.micro = micro
self.arch = arch
def __repr__(self):
return 'Branch ({})'.format(self.version())
def __lt__(self, other):
if (self.major != other.major):
return other.major < self.major
if (self.minor != other.minor):
return other.minor < self.minor
if self.micro:
return other.micro < self.micro
return 0
def version(self):
return '{}.{}{}'.format(self.major, self.minor, '.' + str(self.micro) if self.micro else '')
def is_dkms(self):
return 'dkms' in self.name
def is_open(self):
return 'open' in self.name
def get_stream_hash(name, stream, version, distro):
uniq_str = name + stream + version + distro
hash_str = hashlib.md5(uniq_str.encode('utf-8')).hexdigest()[:10]
print('context: ' + hash_str + ' = ', name, stream, version, distro)
return hash_str
def version_from_rpm_filename(filename):
# name - version - release.dist.arch.rpm
hyphen_parts = filename.split('-')
assert(len(hyphen_parts) >= 3)
dotpart = hyphen_parts[len(hyphen_parts) - 1]
ndots = len(dotpart.split('.'))
dotpart = dotpart[:dotpart.rfind('.')] # Remove the file extension
dotpart = dotpart[:dotpart.rfind('.')] # Remove the arch
if ndots >= 4:
dotpart = dotpart[:dotpart.rfind('.')] # Remove the dist
# The remainder should just be the release.
release = dotpart
# Get the version
version = hyphen_parts[len(hyphen_parts) - 2]
version_parts = version.split('.')
micro = version_parts[2] if len(version_parts) == 3 else None
return (version_parts[0], version_parts[1], micro, release)
def arch_from_rpm_filename(filename):
# name - version - release.dist.arch.rpm
# remove extension
arch = filename[:filename.rfind('.')]
arch = arch[arch.rfind('.') + 1:]
return arch
def distro_from_rpm_filename(filename):
# name - version - release.dist.arch.rpm
distro = filename.split('.')[-3]
return distro
def verkey_rpms(rpm):
version = version_from_rpm_filename(rpm)
major = version[0].rjust(4, '0')
minor = version[1].rjust(4, '0')
micro = version[2].rjust(4, '0') if version[2] else '0000'
rel = version[3].rjust(4, '0')
key = '{}{}{}{}'.format(major, minor, micro, rel)
return int(key)
def sort_rpms(rpms):
return sorted(rpms, reverse = True, key = verkey_rpms)
def rpm_is_kmod(filename):
return filename.startswith(KMOD_PKG_PREFIX) and not 'dkms' and not 'open' in filename
def kmod_belongs_to(kmod_filename, branch):
return branch.version() in kmod_filename
def get_rpm_epoch(rpmfile, repodir):
cmd = ['rpm', '-qp', '--nosignature', '--qf', '%{epochnum}', repodir + rpmfile]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
# Print warnings but try to ignore the one about the key
if stderr and not stderr.endswith('NOKEY\n'):
print(stderr)
return stdout.decode('utf-8')
def rpm_is_pkgname(rpm, pkgname, pkgversion = ''):
"""
checks whether the given rpm filename fits the given package name
"""
rpm_stops = len(rpm.split('-'))
pkg_stops = len(pkgname.split('-'))
if pkgversion == '':
return rpm.startswith(pkgname) and rpm_stops == pkg_stops + 2
else:
return rpm.startswith(pkgname) and pkgversion in rpm and rpm_stops == pkg_stops + 2
def all_rpms_from_pkgname(rpms, pkgname, majorversion):
candidates = [f for f in rpms if rpm_is_pkgname(f, pkgname, majorversion)]
return sort_rpms(candidates) # Sort them anyway, just because
def latest_rpm_from_pkgname(rpms, pkgname, pkgversion = ''):
candidates = [f for f in rpms if rpm_is_pkgname(f, pkgname, pkgversion)]
if len(candidates) == 0: return None
# If a pkgversion is given, we should generally have only one rpm per
# stream. However, if there are mulitple rpm files in the given version
# but with different release numbers, we need to use the latest one, so
# just sort the rpms
candidates = sort_rpms(candidates)
return candidates[0]
def filename_to_nevra(filename, repodir):
epoch = get_rpm_epoch(filename, repodir)
hyphen_parts = filename.split('-')
assert len(hyphen_parts) > 2, "filename not well-formed: %r" % filename
nevra = ''
# Add all parts until the version
for i in range(0, len(hyphen_parts) - 2):
nevra += hyphen_parts[i] + '-'
nevra += epoch
nevra += ':'
nevra += hyphen_parts[len(hyphen_parts) - 2]
last = hyphen_parts[len(hyphen_parts) - 1] # Remove file extension
last = last[:last.rfind('.')]
nevra += '-'
nevra += last
return nevra
if __name__ == '__main__':
repodir = './rpms/'
outfile = ''
if len(sys.argv) > 1:
repodir = sys.argv[1] + '/'
else:
print('Usage: ' + sys.argv[0] + ' [INDIR] [OUTFILE]')
sys.exit()
if len(sys.argv) > 2:
outfile = sys.argv[2]
out = Writer()
now = datetime.datetime.now()
repodir_contents = listdir(repodir)
rpm_files = [f for f in repodir_contents if f.endswith('.rpm') and isfile(join(repodir, f))]
driver_rpms = [n for n in rpm_files if n.startswith(BRANCH_PKGS[0])]
kmod_rpms = [n for n in rpm_files if rpm_is_kmod(n)]
if len(driver_rpms) == 0:
print('Error: No driver rpms (starting with ' + BRANCH_PKGS[0] + ') found.')
sys.exit()
branches = []
# Figure out the branches
driver_rpms = sort_rpms(driver_rpms)
for pkg in driver_rpms:
stops = len(BRANCH_PKGS[0].split('-'))
pkg_stops = len(pkg.split('-'))
if (pkg_stops != stops + 2):
continue
version = version_from_rpm_filename(pkg)
major = version[0]
minor = version[1]
micro = version[2]
n_branches = len(branches)
if n_branches == 0 or (n_branches > 0 and branches[n_branches - 1].major != major):
arch = arch_from_rpm_filename(pkg)
distro = distro_from_rpm_filename(pkg)
branches.append(Branch(major, major, minor, micro, arch))
branches.append(Branch(major + "-dkms", major, minor, micro, arch))
if int(major) > 510:
branches.append(Branch(major + "-open", major, minor, micro, arch))
branches = sorted(branches)
if len(branches) == 0:
print('Error: Could not determine branches from the given rpm files in ' + repodir)
print('RPM files found:')
for p in repodir_contents:
print(' - ' + str(p))
print('Driver rpms:')
for p in driver_rpms:
print(' - ' + str(p))
sys.exit()
# Add 'latest' branch with the same version as the highest-versioned other branch
latest = branches[0]
latest_branch = Branch('latest', latest.major, latest.minor, latest.micro, latest.arch)
branches.insert(0, latest_branch)
print('Latest Branch: ' + latest_branch.version())
latest_dkms_branch = Branch('latest-dkms', latest.major, latest.minor, latest.micro, latest.arch)
branches.insert(1, latest_dkms_branch)
# Add 'open-dkms' branch with the same version as the 'latest-dkms' branch
if int(latest.major) > 510:
open_dkms_branch = Branch('open-dkms', latest.major, latest.minor, latest.micro, latest.arch)
branches.insert(2, open_dkms_branch)
for branch in branches:
print('Branch: ' + branch.name + '(Version: ' + branch.version() + ')')
time_stamp = now.strftime('%Y%m%d%H%M%S')
out.line('document: modulemd')
out.line('version: 2')
out.line('data:')
out.tab().line('name: nvidia-driver')
out.tab().line('stream: ' + branch.name)
out.tab().line('version: ' + time_stamp)
out.tab().line('context: ' + get_stream_hash('nvidia-driver', branch.name, time_stamp, distro))
out.tab().line('arch: ' + branch.arch)
out.tab().line('summary: Nvidia driver for ' + branch.name + ' branch')
out.tab().line('description: >-')
for line in DESCRIPTION:
out.tab().tab().line(line.replace('{version}', branch.version()))
out.tab().line('license:')
out.tab().tab().line('module:')
out.tab().tab().tab().line('- MIT')
out.tab().line('artifacts:')
out.tab().tab().line('rpms:')
existing_branch_pkgs = set()
optional_branch_pkgs = set()
for pkg in BRANCH_PKGS:
latest_pkg = latest_rpm_from_pkgname(rpm_files, pkg, branch.version())
if not latest_pkg:
print('WARNING: No package named ' + pkg + ' in version "' + \
branch.version() + '" found in rpmdir')
for p in all_rpms_from_pkgname(rpm_files, pkg, branch.major):
out.tab().tab().tab().line('- ' + filename_to_nevra(p, repodir))
existing_branch_pkgs.add(pkg)
for opt in OPTIONAL_PKGS:
if opt == "nvidia-kmod-headers" and branch.is_open():
continue
if opt == "nvidia-kmod-source" and not branch.is_open():
continue
for o in all_rpms_from_pkgname(rpm_files, opt, branch.major):
out.tab().tab().tab().line('- ' + filename_to_nevra(o, repodir))
optional_branch_pkgs.add(opt)
for pkg in LATEST_PKGS:
latest_pkg = latest_rpm_from_pkgname(rpm_files, pkg)
if latest_pkg:
out.tab().tab().tab().line('- ' + filename_to_nevra(latest_pkg, repodir))
else:
print('WARNING: No package ' + str(pkg) + ' for branch ' + branch.name + ' found')
if branch.is_open():
open_pkg = latest_rpm_from_pkgname(rpm_files, 'kmod-nvidia-open-dkms', branch.version())
if open_pkg:
out.tab().tab().tab().line('- ' + filename_to_nevra(open_pkg, repodir))
else:
print('WARNING: RPM kmod-nvidia-open-dkms in version ' + branch.version() + ' not found')
elif branch.is_dkms():
dkms_pkg = latest_rpm_from_pkgname(rpm_files, 'kmod-nvidia-latest-dkms', branch.version())
if dkms_pkg:
out.tab().tab().tab().line('- ' + filename_to_nevra(dkms_pkg, repodir))
else:
print('WARNING: RPM kmod-nvidia-latest-dkms in version ' + branch.version() + ' not found')
else:
# All the kmod rpms which belong to this branch
branch_kmod_rpms = list(filter(lambda r: kmod_belongs_to(r, branch), kmod_rpms))
if not branch_kmod_rpms:
print('WARNING: Branch %s in version %s is not a DKMS branch, but no precompiled kmod packages can be found' % (branch.name, branch.version()))
else:
for rpm in branch_kmod_rpms:
out.tab().tab().tab().line('- ' + filename_to_nevra(rpm, repodir))
out.tab().line('profiles:')
out.tab().tab().line('default:')
out.tab().tab().tab().line('description: Default installation')
out.tab().tab().tab().line('rpms:')
for pkg in sorted(existing_branch_pkgs):
out.tab().tab().tab().tab().line('- ' + pkg)
if branch.is_open():
out.tab().tab().tab().tab().line('- kmod-nvidia-open-dkms')
out.tab().tab().line('src:')
out.tab().tab().tab().line('description: Source files for compiling open kernel modules')
out.tab().tab().tab().line('rpms:')
out.tab().tab().tab().tab().line('- nvidia-kmod-source')
elif branch.is_dkms():
out.tab().tab().tab().tab().line('- kmod-nvidia-latest-dkms')
else:
out.tab().tab().line('src:')
out.tab().tab().tab().line('description: Source headers for compilation')
out.tab().tab().tab().line('rpms:')
out.tab().tab().tab().tab().line('- nvidia-kmod-headers')
if branch.arch == "x86_64":
out.tab().tab().line('fm:')
out.tab().tab().tab().line('description: FabricManager installation')
out.tab().tab().tab().line('rpms:')
for pkg in sorted(existing_branch_pkgs):
out.tab().tab().tab().tab().line('- ' + pkg)
if branch.is_open():
out.tab().tab().tab().tab().line('- kmod-nvidia-open-dkms')
elif branch.is_dkms():
out.tab().tab().tab().tab().line('- kmod-nvidia-latest-dkms')
if "latest" in branch.name:
out.tab().tab().tab().tab().line('- ' + 'nvidia-fabric-manager')
out.tab().tab().tab().tab().line('- ' + 'libnvidia-nscq-' + latest.major)
elif int(branch.major) < 460:
out.tab().tab().tab().tab().line('- ' + 'nvidia-fabricmanager-' + branch.major)
out.tab().tab().tab().tab().line('- ' + 'libnvidia-nscq-' + branch.major)
else:
out.tab().tab().tab().tab().line('- ' + 'nvidia-fabric-manager')
out.tab().tab().tab().tab().line('- ' + 'libnvidia-nscq-' + branch.major)
if branch.arch == "aarch64" and int(branch.major) > 470:
out.tab().tab().line('fm:')
out.tab().tab().tab().line('description: FabricManager installation')
out.tab().tab().tab().line('rpms:')
for pkg in sorted(existing_branch_pkgs):
out.tab().tab().tab().tab().line('- ' + pkg)
if branch.is_open():
out.tab().tab().tab().tab().line('- kmod-nvidia-open-dkms')
elif branch.is_dkms():
out.tab().tab().tab().tab().line('- kmod-nvidia-latest-dkms')
out.tab().tab().tab().tab().line('- ' + 'nvidia-fabric-manager')
out.tab().tab().line('ks:')
out.tab().tab().tab().line('description: Installation via kickstart')
out.tab().tab().tab().line('rpms:')
for pkg in sorted(existing_branch_pkgs):
if "cuda-drivers" not in pkg:
out.tab().tab().tab().tab().line('- ' + pkg)
if branch.is_open():
out.tab().tab().tab().tab().line('- kmod-nvidia-open-dkms')
elif branch.is_dkms():
out.tab().tab().tab().tab().line('- kmod-nvidia-latest-dkms')
out.next()
out.line('document: modulemd-defaults')
out.line('version: 1')
out.line('data:')
out.tab().line('module: nvidia-driver')
out.tab().line('stream: latest-dkms')
out.tab().line('profiles:')
for branch in branches:
out.tab().tab().line(branch.name + ': [default]')
out.write(outfile)
# Run modulemd-validator on the output, to catch
# bugs early. Since modifyrepo doesn't do it...
if len(outfile) > 0 and os.path.isfile('/usr/bin/modulemd-validator'):
print('Running modulemd-validator...', end='')
process = subprocess.Popen(['/usr/bin/modulemd-validator', outfile], \
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout = process.communicate()[0]
if process.returncode != 0:
print('')
print(stdout)
else:
print(' OK')
| cuda-repo-management-main | genmodules.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import netCDF4 as nc
import numpy as np
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from tqdm import tqdm
start_y = 1981
end_y = 2020
for y in range(end_y - start_y + 1):
start_date = datetime(year=start_y+y, month=1, day=1)
end_date = datetime(year=start_y+y, month=12, day=1)
n_months = relativedelta(end_date, start_date).months + 1
dates = [start_date + relativedelta(months=x) for x in range(n_months)]
for date in tqdm(dates):
file_prefix = date.strftime('e5.accumulated_tp_1h.%Y%m')
input_filename = f"./raw_1hr_all/{file_prefix}.nc"
output_filename = f"./binary_1hr_all/{file_prefix}.bin"
print(f"Reading {input_filename} and outputting to {output_filename}")
ds = nc.Dataset(input_filename, "r", format="NETCDF4")
precip = np.asarray(ds["tp"])
precip.tofile(output_filename)
| HMM_sample_code-main | miniapp/era5_preicp_nc_to_bin.py |
#!/usr/bin/env python
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extract strings from command files and externalize into translation files.
Expects to be run from the root directory of the repository.
Usage:
extract.py pkg/kubectl/cmd/apply.go
"""
import fileinput
import sys
import re
class MatchHandler(object):
""" Simple holder for a regular expression and a function
to run if that regular expression matches a line.
The function should expect (re.match, file, linenumber) as parameters
"""
def __init__(self, regex, replace_fn):
self.regex = re.compile(regex)
self.replace_fn = replace_fn
def short_replace(match, file, line_number):
"""Replace a Short: ... cobra command description with an internationalization
"""
sys.stdout.write('{}i18n.T({}),\n'.format(match.group(1), match.group(2)))
SHORT_MATCH = MatchHandler(r'(\s+Short:\s+)("[^"]+"),', short_replace)
def import_replace(match, file, line_number):
"""Add an extra import for the i18n library.
Doesn't try to be smart and detect if it's already present, assumes a
gofmt round wil fix things.
"""
sys.stdout.write('{}\n"k8s.io/kubectl/pkg/util/i18n"\n'.format(match.group(1)))
IMPORT_MATCH = MatchHandler('(.*"k8s.io/kubectl/pkg/cmd/util")', import_replace)
def string_flag_replace(match, file, line_number):
"""Replace a cmd.Flags().String("...", "", "...") with an internationalization
"""
sys.stdout.write('{}i18n.T("{})"))\n'.format(match.group(1), match.group(2)))
STRING_FLAG_MATCH = MatchHandler('(\s+cmd\.Flags\(\).String\("[^"]*", "[^"]*", )"([^"]*)"\)', string_flag_replace)
def long_string_replace(match, file, line_number):
return '{}i18n.T({}){}'.format(match.group(1), match.group(2), match.group(3))
LONG_DESC_MATCH = MatchHandler('(LongDesc\()(`[^`]+`)([^\n]\n)', long_string_replace)
EXAMPLE_MATCH = MatchHandler('(Examples\()(`[^`]+`)([^\n]\n)', long_string_replace)
def replace(filename, matchers, multiline_matchers):
"""Given a file and a set of matchers, run those matchers
across the file and replace it with the results.
"""
# Run all the matchers
line_number = 0
for line in fileinput.input(filename, inplace=True):
line_number += 1
matched = False
for matcher in matchers:
match = matcher.regex.match(line)
if match:
matcher.replace_fn(match, filename, line_number)
matched = True
break
if not matched:
sys.stdout.write(line)
sys.stdout.flush()
with open(filename, 'r') as datafile:
content = datafile.read()
for matcher in multiline_matchers:
match = matcher.regex.search(content)
while match:
rep = matcher.replace_fn(match, filename, 0)
# Escape back references in the replacement string
# (And escape for Python)
# (And escape for regex)
rep = re.sub('\\\\(\\d)', '\\\\\\\\\\1', rep)
content = matcher.regex.sub(rep, content, 1)
match = matcher.regex.search(content)
sys.stdout.write(content)
# gofmt the file again
from subprocess import call
call(["goimports", "-w", filename])
replace(sys.argv[1], [SHORT_MATCH, IMPORT_MATCH, STRING_FLAG_MATCH], [LONG_DESC_MATCH, EXAMPLE_MATCH])
| gpu-operator-master | vendor/k8s.io/kubectl/pkg/util/i18n/translations/extract.py |
#!/usr/bin/env python3
import docker
import os
import re
import sys
import shutil
import tempfile
import time
def get_expected_labels_regexs():
with open("./expected-output.txt") as f:
expected_labels = f.readlines()
expected_labels = [x.strip() for x in expected_labels]
return [re.compile(label) for label in expected_labels]
def check_labels(expected_labels_regexs, labels):
for label in labels[:]:
for label_regex in expected_labels_regexs[:]:
if label_regex.match(label):
expected_labels_regexs.remove(label_regex)
labels.remove(label)
break
for label in labels:
print("Unexpected label: {}".format(label))
for regex in expected_labels_regexs:
print("Missing label matching regex: {}".format(regex.pattern))
return len(expected_labels_regexs) == 0 and len(labels) == 0
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: {} DOCKER_IMAGE".format(sys.argv[0]))
sys.exit(1)
image = sys.argv[1]
print("Running integration tests for GFD")
client = docker.from_env()
with tempfile.TemporaryDirectory() as tmpdirname:
mount = docker.types.Mount("/etc/kubernetes/node-feature-discovery/features.d",
tmpdirname, "bind")
print("Running GFD")
container = client.containers.run(image, detach=True, privileged=True, mounts=[mount,])
print("Waiting for GFD output file")
while container.status != "exited" and not os.path.exists(tmpdirname + "/gfd"):
time.sleep(1)
container.reload()
print("Container logs:\n{}".format(container.logs().decode()))
shutil.copyfile(tmpdirname + "/gfd", tmpdirname + "/gfd-copy")
container.stop()
with open(tmpdirname + "/gfd-copy") as output_file:
content = output_file.readlines()
content = [x.strip() for x in content]
expected_labels = get_expected_labels_regexs()
if not check_labels(expected_labels, content):
print("Integration tests failed")
sys.exit(1)
print("Integration tests done")
sys.exit(0)
| gpu-feature-discovery-master | tests/integration-tests.py |
#!/usr/bin/env python3
import re
import sys
import time
import yaml
from kubernetes import client, config, watch
def get_expected_labels_regexs():
with open("./expected-output.txt") as f:
expected_labels = f.readlines()
return [re.compile(label.strip()) for label in expected_labels]
def deploy_yaml_file(core_api, apps_api, rbac_api, daemonset_yaml_file):
with open(daemonset_yaml_file) as f:
bodies = yaml.safe_load_all(f)
for body in bodies:
namespace = body["metadata"].get("namespace", "default")
if body["kind"] == "Namespace":
core_api.create_namespace(body)
elif body["kind"] == "DaemonSet":
apps_api.create_namespaced_daemon_set(namespace, body)
elif body["kind"] == "ServiceAccount":
core_api.create_namespaced_service_account(namespace, body)
elif body["kind"] == "ClusterRole":
rbac_api.create_cluster_role(body)
elif body["kind"] == "ClusterRoleBinding":
rbac_api.create_cluster_role_binding(body)
else:
print("Unknown kind {}".format(body["kind"]), file=sys.stderr)
sys.exit(1)
def check_labels(expected_labels_regexs, labels):
for label in labels[:]:
if label.startswith("feature.node.kubernetes.io/"):
labels.remove(label)
continue
for label_regex in expected_labels_regexs[:]:
if label_regex.match(label):
expected_labels_regexs.remove(label_regex)
labels.remove(label)
break
for label in labels:
print("Unexpected label on node: {}".format(label), file=sys.stderr)
for regex in expected_labels_regexs:
print("Missing label matching regex: {}".format(regex.pattern), file=sys.stderr)
return len(expected_labels_regexs) == 0 and len(labels) == 0
if __name__ == '__main__':
if len(sys.argv) != 3:
print("Usage: {} GFD_YAML_PATH NFD_YAML_PATH".format(sys.argv[0]))
sys.exit(1)
print("Running E2E tests for GFD")
config.load_kube_config()
core_api = client.CoreV1Api()
apps_api = client.AppsV1Api()
rbac_api = client.RbacAuthorizationV1Api()
nodes = core_api.list_node().items
# Should we limit to only one node ?
if len(nodes) < 1:
print("No nodes found", file=sys.stderr)
sys.exit(1)
regexs = get_expected_labels_regexs()
for k, v in nodes[0].metadata.labels.items():
regexs.append(re.compile(k + "=" + v))
print("Deploy NFD and GFD")
deploy_yaml_file(core_api, apps_api, rbac_api, sys.argv[1]) # GFD
deploy_yaml_file(core_api, apps_api, rbac_api, sys.argv[2]) # NFD
timestamp_label_name = "nvidia.com/gfd.timestamp"
print("Watching node updates")
stop = False
w = watch.Watch()
for event in w.stream(core_api.list_node, _request_timeout=180):
if event['type'] == 'MODIFIED':
print("Node modified")
for label_name in event['object'].metadata.labels:
if label_name == timestamp_label_name:
stop = True
print("Timestamp label found. Stop watching node")
break
if stop:
break
print("Checking labels")
nodes = core_api.list_node().items
labels = [k + "=" + v for k, v in nodes[0].metadata.labels.items()]
if not check_labels(regexs, labels):
print("E2E tests failed", file=sys.stderr)
sys.exit(1)
print("E2E tests done")
sys.exit(0)
| gpu-feature-discovery-master | tests/e2e-tests.py |
#!/usr/bin/env python3
# *****************************************************************************
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import argparse
import models
# Collect all available model classes
model_names = sorted(el for el in models.__dict__
if not el.startswith("__") and callable(models.__dict__[el]))
"""
Reda, Fitsum A., et al. "Unsupervised Video Interpolation Using Cycle Consistency."
arXiv preprint arXiv:1906.05928 (2019).
Jiang, Huaizu, et al. "Super slomo: High quality estimation of multiple
intermediate frames for video interpolation." arXiv pre-print arXiv:1712.00080 (2017).
"""
parser = argparse.ArgumentParser(description="A PyTorch Implementation of Unsupervised Video Interpolation Using "
"Cycle Consistency")
parser.add_argument('--model', metavar='MODEL', default='HJSuperSloMo',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: HJSuperSloMo)')
parser.add_argument('-s', '--save', '--save_root',
default='./result_folder', type=str,
help='Path of the output folder',
metavar='SAVE_PATH')
parser.add_argument('--torch_home', default='./.torch', type=str,
metavar='TORCH_HOME',
help='Path to save pre-trained models from torchvision')
parser.add_argument('-n', '--name', default='trial_0', type=str, metavar='EXPERIMENT_NAME',
help='Name of experiment folder.')
parser.add_argument('--dataset', default='VideoInterp', type=str, metavar='TRAINING_DATALOADER_CLASS',
help='Specify training dataset class for loading (Default: VideoInterp)')
parser.add_argument('--resume', default='', type=str, metavar='CHECKPOINT_PATH',
help='path to checkpoint file (default: none)')
# Resources
parser.add_argument('--distributed_backend', default='nccl', type=str, metavar='DISTRIBUTED_BACKEND',
help='backend used for communication between processes.')
parser.add_argument('-j', '--workers', default=4, type=int,
help='number of data loader workers (default: 10)')
parser.add_argument('-g', '--gpus', type=int, default=-1,
help='number of GPUs to use')
parser.add_argument('--fp16', action='store_true', help='Enable mixed-precision training.')
# Learning rate parameters.
parser.add_argument('--lr', '--learning_rate', default=0.0001, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--lr_scheduler', default='MultiStepLR', type=str,
metavar='LR_Scheduler', help='Scheduler for learning' +
' rate (only ExponentialLR and MultiStepLR supported.')
parser.add_argument('--lr_gamma', default=0.1, type=float,
help='learning rate will be multiplied by this gamma')
parser.add_argument('--lr_step', default=200, type=int,
help='stepsize of changing the learning rate')
parser.add_argument('--lr_milestones', type=int, nargs='+',
default=[250, 450], help="Spatial dimension to " +
"crop training samples for training")
# Gradient.
parser.add_argument('--clip_gradients', default=-1.0, type=float,
help='If positive, clip the gradients by this value.')
# Optimization hyper-parameters
parser.add_argument('-b', '--batch_size', default=4, type=int, metavar='BATCH_SIZE',
help='mini-batch per gpu size (default : 4)')
parser.add_argument('--wd', '--weight_decay', default=0.001, type=float, metavar='WEIGHT_DECAY',
help='weight_decay (default = 0.001)')
parser.add_argument('--seed', default=1234, type=int, metavar="SEED",
help='seed for initializing training. ')
parser.add_argument('--optimizer', default='Adam', type=str, metavar='OPTIMIZER',
help='Specify optimizer from torch.optim (Default: Adam)')
parser.add_argument('--mean_pix', nargs='+', type=float, metavar="RGB_MEAN",
default=[109.93, 109.167, 101.455],
help='mean pixel values carried over from superslomo (default: [109.93, 109.167, 101.455])')
parser.add_argument('--print_freq', default=100, type=int, metavar="PRINT_FREQ",
help='frequency of printing training status (default: 100)')
parser.add_argument('--save_freq', type=int, default=20, metavar="SAVE_FREQ",
help='frequency of saving intermediate models, in epoches (default: 20)')
parser.add_argument('--start_epoch', type=int, default=-1,
help="Set epoch number during resuming")
parser.add_argument('--epochs', default=500, type=int, metavar="EPOCHES",
help='number of total epochs to run (default: 500)')
# Training sequence, supports a single sequence for now
parser.add_argument('--train_file', required=False, metavar="TRAINING_FILE",
help='training file (default : Required)')
parser.add_argument('--crop_size', type=int, nargs='+', default=[704, 704], metavar="CROP_SIZE",
help="Spatial dimension to crop training samples for training (default : [704, 704])")
parser.add_argument('--train_n_batches', default=-1, type=int, metavar="TRAIN_N_BATCHES",
help="Limit the number of minibatch iterations per epoch. Used for debugging purposes. \
(default : -1, means use all available mini-batches")
parser.add_argument('--sample_rate', type=int, default=1,
help='number of frames to skip when sampling input1, {intermediate}, and input2 \
(default=1, ie. we treat consecutive frames for input1 and intermediate, and input2 frames.)')
parser.add_argument('--step_size', type=int, default=-1, metavar="STEP_INTERP",
help='number of frames to skip from one mini-batch to the next mini-batch \
(default -1, means step_size = num_interp + 1')
parser.add_argument('--num_interp', default=7, type=int, metavar="NUM_INTERP",
help='number intermediate frames to interpolate (default : 7)')
# Validation sequence, supports a single sequence for now
parser.add_argument('--val_file', metavar="VALIDATION_FILE",
help='validation file (default : None)')
parser.add_argument('--val_batch_size', type=int, default=1,
help="Batch size to use for validation.")
parser.add_argument('--val_n_batches', default=-1, type=int,
help="Limit the number of minibatch iterations per epoch. Used for debugging purposes.")
parser.add_argument('--video_fps', type=int, default=30,
help="Render predicted video with a specified frame rate")
parser.add_argument('--initial_eval', action='store_true', help='Perform initial evaluation before training.')
parser.add_argument("--start_index", type=int, default=0, metavar="VAL_START_INDEX",
help="Index to start running validation (default : 0)")
parser.add_argument("--val_sample_rate", type=int, default=1, metavar="VAL_START_INDEX",
help='number of frames to skip when sampling input1, {intermediate}, and input2 (default=1, \
ie. we treat consecutive frames for input1 and intermediate, and input2 frames.)')
parser.add_argument('--val_step_size', type=int, default=-1, metavar="VAL_STEP_INTERP",
help='number of frames to skip from one mini-batch to the next mini-batch \
(default -1, means step_size = num_interp + 1')
parser.add_argument('--val_num_interp', type=int, default=1,
help='number of intermediate frames we want to interpolate for validation. (default: 1)')
# Misc: undersample large sequences (--step_size), compute flow after downscale (--flow_scale)
parser.add_argument('--flow_scale', type=float, default=1.,
help="Flow scale (default: 1.) for robust interpolation in high resolution images.")
parser.add_argument('--skip_aug', action='store_true', help='Skips expensive geometric or photometric augmentations.')
parser.add_argument('--teacher_weight', type=float, default=-1.,
help="Teacher or Pseudo Supervised Loss (PSL)'s weight of contribution to total loss.")
parser.add_argument('--apply_vidflag', action='store_true', help='Apply applying the BRG flag to interpolated frames.')
parser.add_argument('--write_video', action='store_true', help='save video to \'args.save/args.name.mp4\'.')
parser.add_argument('--write_images', action='store_true',
help='write to folder \'args.save/args.name\' prediction and ground-truth images.')
parser.add_argument('--stride', type=int, default=64,
help='the largest factor a model reduces spatial size of inputs during a forward pass.')
parser.add_argument('--post_fix', default='Proposed', type=str,
help='tag for predicted frames (default: \'proposed\')')
# Required for torch distributed launch
parser.add_argument('--local_rank', default=None, type=int,
help='Torch Distributed')
| unsupervised-video-interpolation-master | parser.py |
import sys
import os
import subprocess
import time
from inspect import isclass
import numpy as np
class TimerBlock:
def __init__(self, title):
print(("{}".format(title)))
def __enter__(self):
self.start = time.clock()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.end = time.clock()
self.interval = self.end - self.start
if exc_type is not None:
self.log("Operation failed\n")
else:
self.log("Operation finished\n")
def log(self, string):
duration = time.clock() - self.start
units = 's'
if duration > 60:
duration = duration / 60.
units = 'm'
print(" [{:.3f}{}] {}".format(duration, units, string), flush=True)
def module_to_dict(module, exclude=[]):
return dict([(x, getattr(module, x)) for x in dir(module)
if isclass(getattr(module, x))
and x not in exclude
and getattr(module, x) not in exclude])
# AverageMeter: adapted from https://github.com/pytorch/examples/blob/master/imagenet/main.py
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
# creat_pipe: adapted from https://stackoverflow.com/questions/23709893/popen-write-operation-on-closed-file-images-to-video-using-ffmpeg/23709937#23709937
# start an ffmpeg pipe for creating RGB8 for color images or FFV1 for depth
# NOTE: this is REALLY lossy and not optimal for HDR data. when it comes time to train
# on HDR data, you'll need to figure out the way to save to pix_fmt=rgb48 or something
# similar
def create_pipe(pipe_filename, width, height, frame_rate=60, quite=True):
# default extension and tonemapper
pix_fmt = 'rgb24'
out_fmt = 'yuv420p'
codec = 'h264'
command = ['ffmpeg',
'-threads', '2', # number of threads to start
'-y', # (optional) overwrite output file if it exists
'-f', 'rawvideo', # input format
'-vcodec', 'rawvideo', # input codec
'-s', str(width) + 'x' + str(height), # size of one frame
'-pix_fmt', pix_fmt, # input pixel format
'-r', str(frame_rate), # frames per second
'-i', '-', # The imput comes from a pipe
'-an', # Tells FFMPEG not to expect any audio
'-codec:v', codec, # output codec
'-crf', '18',
# compression quality for h264 (maybe h265 too?) - http://slhck.info/video/2017/02/24/crf-guide.html
# '-compression_level', '10', # compression level for libjpeg if doing lossy depth
'-strict', '-2', # experimental 16 bit support nessesary for gray16le
'-pix_fmt', out_fmt, # output pixel format
'-s', str(width) + 'x' + str(height), # output size
pipe_filename]
cmd = ' '.join(command)
if not quite:
print('openning a pip ....\n' + cmd + '\n')
# open the pipe, and ignore stdout and stderr output
DEVNULL = open(os.devnull, 'wb')
return subprocess.Popen(command, stdin=subprocess.PIPE, stdout=DEVNULL, stderr=DEVNULL, close_fds=True)
def get_pred_flag(height, width):
pred_flag = np.ones((height, width, 3), dtype=np.uint8)
pred_values = np.zeros((height, width, 3), dtype=np.uint8)
hstart = int((192. / 1200) * height)
wstart = int((224. / 1920) * width)
h_step = int((24. / 1200) * height)
w_step = int((32. / 1920) * width)
pred_flag[hstart:hstart + h_step, -wstart + 0 * w_step:-wstart + 1 * w_step, :] = np.asarray([0, 0, 0])
pred_flag[hstart:hstart + h_step, -wstart + 1 * w_step:-wstart + 2 * w_step, :] = np.asarray([0, 0, 0])
pred_flag[hstart:hstart + h_step, -wstart + 2 * w_step:-wstart + 3 * w_step, :] = np.asarray([0, 0, 0])
pred_values[hstart:hstart + h_step, -wstart + 0 * w_step:-wstart + 1 * w_step, :] = np.asarray([0, 0, 255])
pred_values[hstart:hstart + h_step, -wstart + 1 * w_step:-wstart + 2 * w_step, :] = np.asarray([0, 255, 0])
pred_values[hstart:hstart + h_step, -wstart + 2 * w_step:-wstart + 3 * w_step, :] = np.asarray([255, 0, 0])
return pred_flag, pred_values
def copy_arguments(main_dict, main_filepath='', save_dir='./'):
pycmd = 'python3 ' + main_filepath + ' \\\n'
_main_dict = main_dict.copy()
_main_dict['--name'] = _main_dict['--name']+'_replicate'
for k in _main_dict.keys():
if 'batchNorm' in k:
pycmd += ' ' + k + ' ' + str(_main_dict[k]) + ' \\\n'
elif type(_main_dict[k]) == bool and _main_dict[k]:
pycmd += ' ' + k + ' \\\n'
elif type(_main_dict[k]) == list:
pycmd += ' ' + k + ' ' + ' '.join([str(f) for f in _main_dict[k]]) + ' \\\n'
elif type(_main_dict[k]) != bool:
pycmd += ' ' + k + ' ' + str(_main_dict[k]) + ' \\\n'
pycmd = '#!/bin/bash\n' + pycmd[:-2]
job_script = os.path.join(save_dir, 'job.sh')
file = open(job_script, 'w')
file.write(pycmd)
file.close()
return
def block_print():
sys.stdout = open(os.devnull, 'w')
| unsupervised-video-interpolation-master | utils.py |
#!/usr/bin/env python3
# *****************************************************************************
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import os
import random
import math
import numpy as np
import torch.backends.cudnn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from tensorboardX import SummaryWriter
from tqdm import tqdm
tqdm.monitor_interval = 0
import datasets
import models
import utils
from parser import parser
from eval import evaluate
from datasets import data_transforms
# Import apex's distributed module.
try:
from apex.parallel import DistributedDataParallel
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this example.")
from apex import amp
"""
Reda, Fitsum A., et al. "Unsupervised Video Interpolation Using Cycle Consistency."
arXiv preprint arXiv:1906.05928 (2019).
Jiang, Huaizu, et al. "Super slomo: High quality estimation of multiple
intermediate frames for video interpolation." arXiv pre-print arXiv:1712.00080 (2017).
"""
def parse_and_set_args(block):
args = parser.parse_args()
torch.backends.cudnn.benchmark = True
block.log("Enabling torch.backends.cudnn.benchmark")
if args.resume != '':
block.log("Setting initial eval to true since checkpoint is provided")
args.initial_eval = True
args.rank = int(os.getenv('RANK', 0))
args.world_size = int(os.getenv("WORLD_SIZE", 1))
if args.local_rank:
args.rank = args.local_rank
if args.local_rank is not None and args.local_rank != 0:
utils.block_print()
block.log("Creating save directory: {}".format(
os.path.join(args.save, args.name)))
args.save_root = os.path.join(args.save, args.name)
os.makedirs(args.save_root, exist_ok=True)
assert os.path.exists(args.save_root)
# temporary directory for torch pre-trained models
os.makedirs(args.torch_home, exist_ok=True)
os.environ['TORCH_HOME'] = args.torch_home
defaults, input_arguments = {}, {}
for key in vars(args):
defaults[key] = parser.get_default(key)
for argument, value in sorted(vars(args).items()):
if value != defaults[argument] and argument in vars(parser.parse_args()).keys():
input_arguments['--' + str(argument)] = value
block.log('{}: {}'.format(argument, value))
if args.rank == 0:
utils.copy_arguments(input_arguments, os.path.realpath(__file__),
args.save_root)
args.network_class = utils.module_to_dict(models)[args.model]
args.optimizer_class = utils.module_to_dict(torch.optim)[args.optimizer]
args.dataset_class = utils.module_to_dict(datasets)[args.dataset]
return args
def initialize_distributed(args):
# Manually set the device ids.
torch.cuda.set_device(args.rank % torch.cuda.device_count())
# Call the init process
if args.world_size > 1:
init_method = 'env://'
torch.distributed.init_process_group(
backend=args.distributed_backend,
world_size=args.world_size, rank=args.rank,
init_method=init_method)
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def get_train_and_valid_data_loaders(block, args):
transform = data_transforms.Compose([
# geometric augmentation
data_transforms.NumpyToPILImage(),
data_transforms.RandomTranslate2D(max_displ_factor=0.05),
data_transforms.RandomRotate2D(base_angle=17, delta_angle=5),
data_transforms.RandomScaledCrop2D(crop_height=args.crop_size[0],
crop_width=args.crop_size[1], min_crop_ratio=0.8),
data_transforms.RandomVerticalFlip(prob=0.5),
data_transforms.RandomHorizontalFlip(prob=0.5),
data_transforms.PILImageToNumpy(),
# photometric augmentation
data_transforms.RandomGamma(gamma_low=0.9, gamma_high=1.1),
data_transforms.RandomBrightness(brightness_factor=0.1),
data_transforms.RandomColorOrder(prob=0.5),
data_transforms.RandomContrast(contrast_low=-0.1, contrast_high=0.1),
data_transforms.RandomSaturation(saturation_low=-0.1, saturation_high=0.1)
])
if args.skip_aug:
transform = data_transforms.Compose([
# geometric augmentation
data_transforms.NumpyToPILImage(),
data_transforms.RandomCrop2D(crop_height=args.crop_size[0],
crop_width=args.crop_size[1]),
data_transforms.RandomVerticalFlip(prob=0.5),
data_transforms.RandomHorizontalFlip(prob=0.5),
data_transforms.PILImageToNumpy()
])
# training dataloader
tkwargs = {'batch_size': args.batch_size,
'num_workers': args.workers,
'pin_memory': True, 'drop_last': True}
step_size = args.step_size if args.step_size > 0 else (args.num_interp + 1)
train_dataset = args.dataset_class(args=args, root=args.train_file, num_interp=args.num_interp,
sample_rate=args.sample_rate, step_size=step_size, is_training=True,
transform=transform)
if args.world_size > 1:
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, sampler=train_sampler,
shuffle=(train_sampler is None), **tkwargs)
block.log('Number of Training Images: {}:({} mini-batches)'.format(
step_size * len(train_loader.dataset), len(train_loader)))
# validation dataloader
vkwargs = {'batch_size': args.val_batch_size,
'num_workers': args.workers,
'pin_memory': True, 'drop_last': True}
step_size = args.val_step_size if args.val_step_size > 0 else (args.val_num_interp + 1)
val_dataset = args.dataset_class(args=args, root=args.val_file, num_interp=args.val_num_interp,
sample_rate=args.val_sample_rate, step_size=step_size)
val_loader = torch.utils.data.DataLoader(
val_dataset, shuffle=False, **vkwargs)
block.log('Number of Validation Images: {}:({} mini-batches)'.format(
step_size * len(val_loader.dataset), len(val_loader)))
args.val_size = val_loader.dataset[0]['image'][0].shape[:2]
return train_loader, train_sampler, val_loader
def load_model(model, optimizer, block, args):
# trained weights
checkpoint = torch.load(args.resume, map_location='cpu')
# used for partial initialization
input_dict = checkpoint['state_dict']
curr_dict = model.state_dict()
state_dict = input_dict.copy()
for key in input_dict:
if key not in curr_dict:
print(key)
continue
if curr_dict[key].shape != input_dict[key].shape:
state_dict.pop(key)
print("key {} skipped because of size mismatch.".format(
key))
model.load_state_dict(state_dict, strict=False)
if 'optimizer' in checkpoint and args.start_epoch < 0:
optimizer.load_state_dict(checkpoint['optimizer'])
if args.start_epoch < 0:
args.start_epoch = max(0, checkpoint['epoch'])
block.log("Successfully loaded checkpoint (at epoch {})".format(
checkpoint['epoch']))
def build_and_initialize_model_and_optimizer(block, args):
model = args.network_class(args)
block.log('Number of parameters: {val:,}'.format(val=
sum([p.data.nelement()
if p.requires_grad else 0 for p in model.parameters()])))
block.log('Initializing CUDA')
assert torch.cuda.is_available(), 'only GPUs support at the moment'
model.cuda(torch.cuda.current_device())
optimizer = args.optimizer_class(
[p for p in model.parameters() if p.requires_grad], lr=args.lr)
block.log("Attempting to Load checkpoint '{}'".format(args.resume))
if args.resume and os.path.isfile(args.resume):
load_model(model, optimizer, block, args)
elif args.resume:
block.log("No checkpoint found at '{}'".format(args.resume))
exit(1)
else:
block.log("Random initialization, checkpoint not provided.")
args.start_epoch = 0
if args.fp16:
model, optimizer = amp.initialize(model, optimizer, opt_level='O1')
# Run multi-process when it is needed.
if args.world_size > 1:
model = DistributedDataParallel(model)
return model, optimizer
def get_learning_rate_scheduler(optimizer, block, args):
block.log('Base leaning rate {}.'.format(args.lr))
if args.lr_scheduler == 'ExponentialLR':
block.log('Using exponential decay learning rate scheduler with '
'{} decay rate'.format(args.lr_gamma))
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer,
args.lr_gamma)
elif args.lr_scheduler == 'MultiStepLR':
block.log('Using multi-step learning rate scheduler with {} gamma '
'and {} milestones.'.format(args.lr_gamma,
args.lr_milestones))
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=args.lr_milestones, gamma=args.lr_gamma)
elif args.lr_scheduler == 'PolyLR':
block.log('Using polynomial decay learning rate scheduler with {} gamma '
'and {} milestones.'.format(args.lr_gamma,
args.lr_milestones))
lr_gamma = math.log(0.1) / math.log(1 - (args.lr_milestones[0] - 1e-6) / args.epochs)
# Poly with lr_gamma until args.lr_milestones[0], then stepLR with factor of 0.1
lambda_map = lambda epoch_index: math.pow(1 - epoch_index / args.epochs, lr_gamma) \
if np.searchsorted(args.lr_milestones, epoch_index + 1) == 0 \
else math.pow(10, -1 * np.searchsorted(args.lr_milestones, epoch_index + 1))
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_map)
else:
raise NameError('Unknown {} learning rate scheduler'.format(
args.lr_scheduler))
return lr_scheduler
def forward_only(inputs_gpu, targets_gpu, model):
# Forward pass.
losses, outputs, targets = model(inputs_gpu, targets_gpu)
# Loss.
for k in losses:
losses[k] = losses[k].mean(dim=0)
loss = losses['tot']
return loss, outputs, targets
def calc_linf_grad_norm(args,parameters):
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter(lambda p: p.grad is not None, parameters))
max_norm = max(p.grad.data.abs().max() for p in parameters)
max_norm_reduced = torch.cuda.FloatTensor([max_norm])
if args.world_size > 1:
torch.distributed.all_reduce(max_norm_reduced,
op=torch.distributed.ReduceOp.MAX)
return max_norm_reduced[0].item()
def train_step(batch_cpu, model, optimizer, block, args, print_linf_grad=False):
# Move data to GPU.
inputs = {k: [b.cuda() for b in batch_cpu[k]]
for k in batch_cpu if k in ['image', 'fwd_mvec', 'bwd_mvec', 'depth']}
tar_index = batch_cpu['tindex'].cuda()
# Forward pass.
loss, outputs, targets = forward_only(inputs, tar_index, model)
# Backward and SGP steps.
optimizer.zero_grad()
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
# Calculate and print norm infinity of the gradients.
if print_linf_grad:
block.log('gradients Linf: {:0.3f}'.format(calc_linf_grad_norm(args,
model.parameters())))
# Clip gradients by value.
if args.clip_gradients > 0:
torch.nn.utils.clip_grad_value_(model.parameters(), args.clip_gradients)
optimizer.step()
return loss, outputs, targets
def evaluate_epoch(model, val_loader, block, args, epoch=0):
# Because train and val number of frame interpolate could be different.
if args.val_num_interp != args.num_interp:
model_ = model
if args.world_size > 1:
model_ = model.module
model_.tlinespace = torch.linspace(
0, 1, 2 + args.val_num_interp).float().cuda()
# calculate validation loss, create videos, or dump predicted frames
v_psnr, v_ssim, v_ie, loss_values = evaluate(args, val_loader, model, args.val_num_interp, epoch, block)
if args.val_num_interp != args.num_interp:
model_ = model
if args.world_size > 1:
model_ = model.module
model_.tlinespace = torch.linspace(0, 1,
2 + args.num_interp).float().cuda()
# Move back the model to train mode.
model.train()
return v_psnr, v_ssim, v_ie, loss_values
def write_summary(global_index, learning_rate, t_loss,
v_loss, v_psnr, v_ssim, v_ie, args):
# Write to tensorboard.
if args.rank == 0:
args.logger.add_scalar("lr", learning_rate, global_index)
args.logger.add_scalars("Loss",
{'trainLoss': t_loss, 'valLoss': v_loss},
global_index)
args.logger.add_scalar("PSNR", v_psnr, global_index)
args.logger.add_scalar("SSIM", v_ssim, global_index)
args.logger.add_scalar("RMS", v_ie, global_index)
def train_epoch(epoch, args, model, optimizer, lr_scheduler,
train_sampler, train_loader,
v_psnr, v_ssim, v_ie, v_loss, block):
# Average loss calculator.
loss_values = utils.AverageMeter()
# Advance Learning rate.
lr_scheduler.step()
# This will ensure the data is shuffled each epoch.
if train_sampler is not None:
train_sampler.set_epoch(epoch)
# Get number of batches in one epoch.
num_batches = len(train_loader) if args.train_n_batches < 0 \
else args.train_n_batches
global_index = 0
for i, batch in enumerate(train_loader):
# Set global index.
global_index = epoch * num_batches + i
# Move one step.
loss, outputs, _ = train_step(
batch, model, optimizer, block, args,
((global_index + 1) % args.print_freq == 0))
# Update the loss accumulator.
loss_values.update(loss.data.item(), outputs.size(0))
# Summary writer.
if (global_index + 1) % args.print_freq == 0:
# Reduce the loss.
if args.world_size > 1:
t_loss_gpu = torch.Tensor([loss_values.val]).cuda()
torch.distributed.all_reduce(t_loss_gpu)
t_loss = t_loss_gpu.item() / args.world_size
else:
t_loss = loss_values.val
# Write to tensorboard.
write_summary(global_index, lr_scheduler.get_lr()[0], t_loss,
v_loss, v_psnr, v_ssim, v_ie, args)
# And reset the loss accumulator.
loss_values.reset()
# Print some output.
dict2print = {'iter': global_index,
'epoch': str(epoch) + '/' + str(args.epochs),
'batch': str(i + 1) + '/' + str(num_batches)}
str2print = ' '.join(key + " : " + str(dict2print[key])
for key in dict2print)
str2print += ' trainLoss:' + ' %1.3f' % t_loss
str2print += ' valLoss' + ' %1.3f' % v_loss
str2print += ' valPSNR' + ' %1.3f' % v_psnr
str2print += ' lr:' + ' %1.6f' % (lr_scheduler.get_lr()[0])
block.log(str2print)
# Break the training loop if we have reached the maximum number of batches.
if (i + 1) >= num_batches:
break
return global_index
def save_model(model, optimizer, epoch, global_index, max_psnr, block, args):
# Write on rank zero only
if args.rank == 0:
if args.world_size > 1:
model_ = model.module
else:
model_ = model
state_dict = model_.state_dict()
tmp_keys = state_dict.copy()
for k in state_dict:
[tmp_keys.pop(k) if (k in tmp_keys and ikey in k)
else None for ikey in model_.ignore_keys]
state_dict = tmp_keys.copy()
# save checkpoint
model_optim_state = {'epoch': epoch,
'arch': args.model,
'state_dict': state_dict,
'optimizer': optimizer.state_dict(),
}
model_name = os.path.join(
args.save_root, '_ckpt_epoch_%03d_iter_%07d_psnr_%1.2f.pt.tar' % (
epoch, global_index, max_psnr))
torch.save(model_optim_state, model_name)
block.log('saved model {}'.format(model_name))
return model_name
def train(model, optimizer, lr_scheduler, train_loader,
train_sampler, val_loader, block, args):
# Set the model to train mode.
model.train()
# Keep track of maximum PSNR.
max_psnr = -1
# Perform an initial evaluation.
if args.initial_eval:
block.log('Initial evaluation.')
v_psnr, v_ssim, v_ie, v_loss = evaluate_epoch(model, val_loader, block, args, args.start_epoch)
else:
v_psnr, v_ssim, v_ie, v_loss = 20.0, 0.5, 15.0, 0.0
for epoch in range(args.start_epoch, args.epochs):
# Train for an epoch.
global_index = train_epoch(epoch, args, model, optimizer, lr_scheduler,
train_sampler, train_loader, v_psnr, v_ssim, v_ie, v_loss, block)
if (epoch + 1) % args.save_freq == 0:
v_psnr, v_ssim, v_ie, v_loss = evaluate_epoch(model, val_loader, block, args, epoch + 1)
if v_psnr > max_psnr:
max_psnr = v_psnr
save_model(model, optimizer, epoch + 1, global_index,
max_psnr, block, args)
return 0
def main():
# Parse the args.
with utils.TimerBlock("\nParsing Arguments") as block:
args = parse_and_set_args(block)
# Initialize torch.distributed.
with utils.TimerBlock("Initializing Distributed"):
initialize_distributed(args)
# Set all random seed for reproducibility.
with utils.TimerBlock("Setting Random Seed"):
set_random_seed(args.seed)
# Train and validation data loaders.
with utils.TimerBlock("Building {} Dataset".format(args.dataset)) as block:
train_loader, train_sampler, val_loader = get_train_and_valid_data_loaders(block, args)
# Build the model and optimizer.
with utils.TimerBlock("Building {} Model and {} Optimizer".format(
args.model, args.optimizer_class.__name__)) as block:
model, optimizer = build_and_initialize_model_and_optimizer(block, args)
# Learning rate scheduler.
with utils.TimerBlock("Building {} Learning Rate Scheduler".format(
args.optimizer)) as block:
lr_scheduler = get_learning_rate_scheduler(optimizer, block, args)
# Set the tf writer on rank 0.
with utils.TimerBlock("Creating Tensorboard Writers"):
if args.rank == 0:
args.logger = SummaryWriter(log_dir=args.save_root)
with utils.TimerBlock("Training Model") as block:
train(model, optimizer, lr_scheduler, train_loader,
train_sampler, val_loader, block, args)
return 0
if __name__ == '__main__':
main()
| unsupervised-video-interpolation-master | train.py |
#!/usr/bin/env python3
# *****************************************************************************
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import os
import sys
import shutil
import natsort
import numpy as np
from glob import glob
from imageio import imsave
from skimage.measure import compare_psnr, compare_ssim
from tqdm import tqdm
tqdm.monitor_interval = 0
import torch
import torch.backends.cudnn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from parser import parser
import datasets
import models
import utils
"""
Reda, Fitsum A., et al. "Unsupervised Video Interpolation Using Cycle Consistency."
arXiv preprint arXiv:1906.05928 (2019).
Jiang, Huaizu, et al. "Super slomo: High quality estimation of multiple
intermediate frames for video interpolation." arXiv pre-print arXiv:1712.00080 (2017).
"""
def main():
with utils.TimerBlock("\nParsing Arguments") as block:
args = parser.parse_args()
args.rank = int(os.getenv('RANK', 0))
block.log("Creating save directory: {}".format(args.save))
args.save_root = os.path.join(args.save, args.name)
if args.write_images or args.write_video:
os.makedirs(args.save_root, exist_ok=True)
assert os.path.exists(args.save_root)
else:
os.makedirs(args.save, exist_ok=True)
assert os.path.exists(args.save)
os.makedirs(args.torch_home, exist_ok=True)
os.environ['TORCH_HOME'] = args.torch_home
args.gpus = torch.cuda.device_count() if args.gpus < 0 else args.gpus
block.log('Number of gpus: {} | {}'.format(args.gpus, list(range(args.gpus))))
args.network_class = utils.module_to_dict(models)[args.model]
args.dataset_class = utils.module_to_dict(datasets)[args.dataset]
block.log('save_root: {}'.format(args.save_root))
block.log('val_file: {}'.format(args.val_file))
with utils.TimerBlock("Building {} Dataset".format(args.dataset)) as block:
vkwargs = {'batch_size': args.gpus * args.val_batch_size,
'num_workers': args.gpus * args.workers,
'pin_memory': True, 'drop_last': True}
step_size = args.val_step_size if args.val_step_size > 0 else (args.num_interp + 1)
val_dataset = args.dataset_class(args=args, root=args.val_file, num_interp=args.num_interp,
sample_rate=args.val_sample_rate, step_size=step_size)
val_loader = torch.utils.data.DataLoader(val_dataset, shuffle=False,
**vkwargs)
args.folder_list = natsort.natsorted(
[os.path.basename(f) for f in sorted(glob(os.path.join(args.val_file, '*')))])
block.log('Number of Validation Images: {}:({} mini-batches)'.format(len(val_loader.dataset), len(val_loader)))
with utils.TimerBlock("Building {} Model".format(args.model)) as block:
model = args.network_class(args)
block.log('Number of parameters: {val:,}'.format(val=
sum([p.data.nelement() if p.requires_grad else 0 for p in model.parameters()])))
block.log('Initializing CUDA')
assert torch.cuda.is_available(), 'Code supported for GPUs only at the moment'
model = model.cuda()
model = torch.nn.DataParallel(model, device_ids=list(range(args.gpus)))
torch.manual_seed(args.seed)
block.log("Attempting to Load checkpoint '{}'".format(args.resume))
if args.resume and os.path.isfile(args.resume):
checkpoint = torch.load(args.resume)
# Partial initialization
input_dict = checkpoint['state_dict']
curr_dict = model.module.state_dict()
state_dict = input_dict.copy()
for key in input_dict:
if key not in curr_dict:
continue
if curr_dict[key].shape != input_dict[key].shape:
state_dict.pop(key)
print("key {} skipped because of size mismatch.".format(key))
model.module.load_state_dict(state_dict, strict=False)
epoch = checkpoint['epoch']
block.log("Successfully loaded checkpoint (at epoch {})".format(epoch))
elif args.resume:
block.log("No checkpoint found at '{}'.\nAborted.".format(args.resume))
sys.exit(0)
else:
block.log("Random initialization, checkpoint not provided.")
with utils.TimerBlock("Inference started ") as block:
evaluate(args, val_loader, model, args.num_interp, epoch, block)
def evaluate(args, val_loader, model, num_interp, epoch, block):
in_height, in_width = val_loader.dataset[0]['ishape']
pred_flag, pred_values = utils.get_pred_flag(in_height, in_width)
if not args.apply_vidflag:
pred_flag = 0 * pred_flag + 1
pred_values = 0 * pred_values
if args.rank == 0 and args.write_video:
video_file = os.path.join(args.save_root, '__epoch_%03d.mp4' % epoch)
_pipe = utils.create_pipe(video_file, in_width, in_height, frame_rate=args.video_fps)
model.eval()
loss_values = utils.AverageMeter()
avg_metrics = np.zeros((0, 3), dtype=float)
num_batches = len(val_loader) if args.val_n_batches < 0 else args.val_n_batches
with torch.no_grad():
for i, batch in enumerate(tqdm(val_loader, total=num_batches)):
inputs = [b.cuda() for b in batch['image']]
input_images = [inputs[0], inputs[len(inputs) // 2], inputs[-1]]
inputs_dict = {'image': input_images}
target_images = inputs[1:-1]
tar_indices = batch['tindex'].cuda()
# compute loss at mid-way
tar_indices[:] = (num_interp + 1) // 2
loss, outputs, _ = model(inputs_dict, tar_indices)
loss_values.update(loss['tot'].data.item(), outputs.size(0))
# compute output for each intermediate timepoint
output_image = inputs[0]
for tarIndex in range(1, num_interp + 1):
tar_indices[:] = tarIndex
_, outputs, _ = model(inputs_dict, tar_indices)
output_image = torch.cat((output_image, outputs), dim=1)
output_image = torch.split(output_image, 3, dim=1)[1:]
batch_size, _, _, _ = inputs[0].shape
input_filenames = batch['input_files'][1:-1]
in_height, in_width = batch['ishape']
for b in range(batch_size):
first_target = (input_images[0][b].data.cpu().numpy().transpose(1, 2, 0)).astype(np.uint8)
first_target = first_target[:in_height, :in_width, :]
second_target = (input_images[-1][b].data.cpu().numpy().transpose(1, 2, 0)).astype(np.uint8)
second_target = second_target[:in_height, :in_width, :]
gt_image = first_target
for index in range(num_interp):
pred_image = (output_image[index][b].data.cpu().numpy().transpose(1, 2, 0)).astype(np.uint8)
pred_image = pred_image[:in_height, :in_width, :]
# if ground-truth not loaded, treat low FPS frames as targets
if index < len(target_images):
gt_image = (target_images[index][b].data.cpu().numpy().transpose(1, 2, 0)).astype(np.uint8)
gt_filename = '/'.join(input_filenames[index][b].split(os.sep)[-2:])
gt_image = gt_image[:in_height, :in_width, :]
# calculate metrics using skimage
psnr = compare_psnr(pred_image, gt_image)
ssim = compare_ssim(pred_image, gt_image, multichannel=True, gaussian_weights=True)
err = pred_image.astype(np.float32) - gt_image.astype(np.float32)
ie = np.mean(np.sqrt(np.sum(err * err, axis=2)))
avg_metrics = np.vstack((avg_metrics, np.array([psnr, ssim, ie])))
# write_images
if args.write_images:
tmp_filename = os.path.join(args.save_root, "%s-%02d-%s.png" % (gt_filename[:-4], (index + 1), args.post_fix))
os.makedirs(os.path.dirname(tmp_filename), exist_ok=True)
imsave(tmp_filename, pred_image)
# write video
if args.rank == 0 and args.write_video:
if index == 0:
_pipe.stdin.write(first_target.tobytes())
try:
_pipe.stdin.write((pred_image * pred_flag + pred_values).tobytes())
except AttributeError:
raise AttributeError("Error in ffmpeg video creation. Inconsistent image size.")
if args.write_images:
tmp_filename = os.path.join(args.save_root, "%s-%02d-%s.png" % (gt_filename[:-4], 0, "ground_truth"))
os.makedirs(os.path.dirname(tmp_filename), exist_ok=True)
imsave(tmp_filename, first_target)
tmp_filename = os.path.join(args.save_root, "%s-%02d-%s.png" % (gt_filename[:-4], num_interp+1, "ground_truth"))
imsave(tmp_filename, second_target)
if (i + 1) >= num_batches:
break
if args.write_video:
_pipe.stdin.close()
_pipe.wait()
"""
Print final accuracy statistics. If intermediate ground truth frames are not available from the input sequence,
the first low FPS frame is treated as a ground-truth frame for all intermediately predicted frames,
as the quantities should not be trusted, in this case.
"""
for i in range(num_interp):
result2print = 'interm {:02d} PSNR: {:.2f}, SSIM: {:.3f}, IE: {:.2f}'.format(i+1,
np.nanmean(avg_metrics[i::num_interp], axis=0)[0],
np.nanmean(avg_metrics[i::num_interp], axis=0)[1],
np.nanmean(avg_metrics[i::num_interp], axis=0)[2])
block.log(result2print)
avg_metrics = np.nanmean(avg_metrics, axis=0)
result2print = 'Overall PSNR: {:.2f}, SSIM: {:.3f}, IE: {:.2f}'.format(avg_metrics[0], avg_metrics[1],
avg_metrics[2])
v_psnr, v_ssim, v_ie = avg_metrics[0], avg_metrics[1], avg_metrics[2]
block.log(result2print)
# re-name video with psnr
if args.rank == 0 and args.write_video:
shutil.move(os.path.join(args.save_root, '__epoch_%03d.mp4' % epoch),
os.path.join(args.save_root, '__epoch_%03d_psnr_%1.2f.mp4' % (epoch, avg_metrics[0])))
# Move back the model to train mode.
model.train()
torch.cuda.empty_cache()
block.log('max memory allocated (GB): {:.3f}: '.format(
torch.cuda.max_memory_allocated() / (1024 * 1024 * 1024)))
return v_psnr, v_ssim, v_ie, loss_values.val
if __name__ == '__main__':
main()
| unsupervised-video-interpolation-master | eval.py |
# *****************************************************************************
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
from __future__ import division
from __future__ import print_function
import os
import natsort
import numpy as np
from imageio import imread
import torch
from torch.utils import data
class VideoInterp(data.Dataset):
def __init__(self, args=None, root='', num_interp=7, sample_rate=1, step_size=1,
is_training=False, transform=None):
self.num_interp = num_interp
self.sample_rate = sample_rate
self.step_size = step_size
self.transform = transform
self.is_training = is_training
self.transform = transform
self.start_index = args.start_index
self.stride = args.stride
self.crop_size = args.crop_size
# argument sanity check
assert (os.path.exists(root)), "Invalid path to input dataset."
assert self.num_interp > 0, "num_interp must be at least 1"
assert self.step_size > 0, "step_size must be at least 1"
if self.is_training:
self.start_index = 0
# collect, colors, motion vectors, and depth
self.ref = self.collect_filelist(root)
# calculate total number of unique sub-sequences
def calc_subseq_len(n):
return (n - max(1, (self.num_interp + 1) * self.sample_rate) - 1) // self.step_size + 1
self.counts = [calc_subseq_len(len(el)) for el in self.ref]
self.total = np.sum(self.counts)
self.cum_sum = list(np.cumsum([0] + [el for el in self.counts]))
def collect_filelist(self, root):
include_ext = [".png", ".jpg", "jpeg", ".bmp"]
# collect subfolders, excluding hidden files, but following symlinks
dirs = [x[0] for x in os.walk(root, followlinks=True) if not x[0].startswith('.')]
# naturally sort, both dirs and individual images, while skipping hidden files
dirs = natsort.natsorted(dirs)
datasets = [
[os.path.join(fdir, el) for el in natsort.natsorted(os.listdir(fdir))
if os.path.isfile(os.path.join(fdir, el))
and not el.startswith('.')
and any([el.endswith(ext) for ext in include_ext])]
for fdir in dirs
]
return [el for el in datasets if el]
def get_sample_indices(self, index, tar_index=None):
if self.is_training:
sample_indices = [index, index + self.sample_rate * tar_index, index +
self.sample_rate * (self.num_interp + 1)]
else:
sample_indices = [index + i * self.sample_rate for i in range(0, self.num_interp + 2)]
if self.sample_rate == 0:
sample_indices[-1] += 1
return sample_indices
def pad_images(self, images):
height, width, _ = images[0].shape
image_count = len(images)
# Pad images with zeros if it is not evenly divisible by args.stride (property of model)
if (height % self.stride) != 0:
new_height = (height // self.stride + 1) * self.stride
for i in range(image_count):
images[i] = np.pad(images[i], ((0, new_height - height), (0, 0), (0, 0)), 'constant',
constant_values=(0, 0))
if (width % self.stride) != 0:
new_width = (width // self.stride + 1) * self.stride
for i in range(image_count):
images[i] = np.pad(images[i], ((0, 0), (0, new_width - width), (0, 0)), 'constant',
constant_values=(0, 0))
return images
def __len__(self):
return self.total
def __getitem__(self, index):
# Adjust index
index = len(self) + index if index < 0 else index
index = index + self.start_index
dataset_index = np.searchsorted(self.cum_sum, index + 1)
index = self.step_size * (index - self.cum_sum[np.maximum(0, dataset_index - 1)])
image_list = self.ref[dataset_index - 1]
# target index, subset of range(1,num_interp+1)
tar_index = 1 + torch.randint(0, max(1, self.num_interp), (1,)).item()
input_indices = self.get_sample_indices(index, tar_index)
# reverse subsequence for augmentation with a probability of 0.5
if self.is_training and torch.randint(0, 2, (1,)).item():
input_indices = input_indices[::-1]
tar_index = self.num_interp - tar_index + 1
image_files = [image_list[i] for i in input_indices]
# Read images from file
images = [imread(image_file)[:, :, :3] for image_file in image_files]
image_shape = images[0].shape
# Apply data augmentation if defined.
if self.transform:
input_images, target_images = [images[0], images[-1]], images[1:-1]
input_images, target_images = self.transform(input_images, target_images)
images = [input_images[0]] + target_images + [input_images[-1]]
# Pad images with zeros, so they fit evenly to model arch in forward pass.
padded_images = self.pad_images(images)
input_images = [torch.from_numpy(np.ascontiguousarray(tmp.transpose(2, 0, 1).astype(np.float32))).float() for
tmp in padded_images]
output_dict = {
'image': input_images, 'tindex': tar_index, 'ishape': image_shape[:2], 'input_files': image_files
}
# print (' '.join([os.path.basename(f) for f in image_files]))
return output_dict
class CycleVideoInterp(VideoInterp):
def __init__(self, args=None, root='', num_interp=7, sample_rate=1, step_size=1,
is_training=False, transform=None):
super(CycleVideoInterp, self).__init__(args=args, root=root, num_interp=num_interp, sample_rate=sample_rate,
step_size=step_size, is_training=is_training, transform=transform)
# # Adjust indices
if self.is_training:
self.counts = [el - 1 for el in self.counts]
self.total = np.sum(self.counts)
self.cum_sum = list(np.cumsum([0] + [el for el in self.counts]))
def get_sample_indices(self, index, tar_index=None):
if self.is_training:
offset = max(1, self.sample_rate) + self.sample_rate * self.num_interp
sample_indices = [index, index + offset, index + 2 * offset]
else:
sample_indices = [index + i * self.sample_rate for i in range(0, self.num_interp + 2)]
if self.sample_rate == 0:
sample_indices[-1] += 1
return sample_indices
| unsupervised-video-interpolation-master | datasets/VideoInterp.py |
# *****************************************************************************
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
from .VideoInterp import *
| unsupervised-video-interpolation-master | datasets/__init__.py |
# *****************************************************************************
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
from __future__ import division
import random
from PIL import Image
import numpy as np
from torchvision.transforms import functional as transf
"""
Compose for Multiple Arguments
"""
class Compose(object):
"""Custom class to serialise transformations that
accept multiple input arguments
Args:
transforms (list of ``Transform`` objects): list of custom transforms to compose
Example:
composed_transf = data_transforms.Compose(
[NumpyToPILImage(),
RandomScaledCrop2D(crop_height=384, crop_width=384, min_crop_ratio=0.8),
PILImageToNumpy(),
RandomReverseSequence(),
RandomBrightness(brightness_factor=0.1)
])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, inputs, targets):
for transform in self.transforms:
inputs, targets = transform(inputs, targets)
return inputs, targets
"""
Image Type Conversion
"""
class NumpyToPILImage(object):
"""Convert numpy array to an instance of PIL Image, so we can use
geometric transformations already available in torchvision.transforms.functional.*.
"""
def __call__(self, inputs, targets):
inputs = [Image.fromarray(np.clip(im, 0, 255)) for im in inputs]
targets = [Image.fromarray(np.clip(im, 0, 255)) for im in targets]
return inputs, targets
class PILImageToNumpy(object):
"""Convert PIL Image to a numpy array at the end of geometric transformations.
Note. All photometric transformations currently work on numpy arrays, because for some
transformations, there is an implementation mis-match between torchvision and the ones defined
in flownet2 (Caffe: https://github.com/lmb-freiburg/flownet2), which they are derived/inspired from.
"""
def __call__(self, inputs, targets):
inputs = [np.array(im) for im in inputs]
targets = [np.array(im) for im in targets]
return inputs, targets
"""
Geometric Augmentation
"""
class RandomRotate2D(object):
"""Apply random 2D in-plane rotation of on input and target image sequences.
For video interpolation or optical flow studies, we also add a small
offset rotation to each image in the sequence ranging from [-delta, delta] degrees
in a linear fashion, such that networks can learn to recover the added fake rotation.
"""
def __init__(self, base_angle=20, delta_angle=0, resample=Image.BILINEAR):
self.base_angle = base_angle
self.delta_angle = delta_angle
self.resample = resample
def __call__(self, inputs, targets):
base = random.uniform(-self.base_angle, self.base_angle)
delta = random.uniform(-self.delta_angle, self.delta_angle)
resample = self.resample
inputs[0] = transf.rotate(inputs[0], angle=(base - delta / 2.), resample=resample)
inputs[-1] = transf.rotate(inputs[1], angle=(base + delta / 2.), resample=resample)
# Apply linearly varying offset to targets
# calculate offset ~ (-delta/2., delta/2.)
tlinspace = np.linspace(-1, 1, len(targets) + 2)
for i, image in enumerate(targets):
offset = tlinspace[i + 1] * delta / 2.
targets[i] = transf.rotate(image, angle=(base + offset), resample=resample)
return inputs, targets
class RandomTranslate2D(object):
"""Apply random 2D translation on input and target image sequences.
For video interpolation or optical flow studies, we also add a small
offset translation to each image in the sequence ranging from [-delta, delta] pixel displacements
in a linear fashion, such that networks can learn to recover the added fake translation.
"""
def __init__(self, max_displ_factor=0.05, resample=Image.NEAREST):
self.max_displ_factor = max_displ_factor
self.resample = resample
def __call__(self, inputs, targets):
# h, w, _ = inputs[0].shape
w, h = inputs[0].size
max_displ_factor = self.max_displ_factor
resample = self.resample
# Sample a displacement in [-max_displ, max_displ] for both height and width
max_width_displ = int(w * max_displ_factor)
wd = random.randint(-max_width_displ, max_width_displ)
max_height_displ = int(h * max_displ_factor)
hd = random.randint(-max_height_displ, max_height_displ)
inputs[0] = transf.affine(inputs[0], angle=0, translate=(wd, hd), scale=1, shear=0, resample=resample)
inputs[-1] = transf.affine(inputs[-1], angle=0, translate=(-wd, -hd), scale=1, shear=0, resample=resample)
# Apply linearly varying offset to targets
# calculate offset ~ (-{w|h}_delta, {w|h}_delta})
tlinspace = -1 * np.linspace(-1, 1, len(targets) + 2)
for i, image in enumerate(targets):
wo, ho = tlinspace[i + 1] * wd, tlinspace[i + 1] * hd
targets[i] = transf.affine(image, angle=0, translate=(wo, ho), scale=1, shear=0, resample=resample)
return inputs, targets
class RandomCrop2D(object):
"""A simple random 3D crop with a provided crop_size.
"""
def __init__(self, crop_height, crop_width):
self.crop_height = crop_height
self.crop_width = crop_width
def __call__(self, inputs, targets):
width, height = inputs[0].size
crop_width, crop_height = self.crop_width, self.crop_height
# sample crop indices
left = random.randint(0, width - crop_width)
top = random.randint(0, height - crop_height)
for i, image in enumerate(inputs):
inputs[i] = transf.crop(image, top, left, crop_height, crop_width)
for i, image in enumerate(targets):
targets[i] = transf.crop(image, top, left, crop_height, crop_width)
return inputs, targets
class RandomScaledCrop2D(object):
"""Apply random 2D crop followed by a scale operation.
Note to simulate a simple crop, set
``min_crop_ratio=min(crop_height,crop_width)/min(height, width)``.
We basically, first, crop the original image with a size larger or smaller than
the desired crop size. We then scale the images to the desired crop_size.
So, in a way, this transformation encapsulates two augmentations: scale + crop.
"""
def __init__(self, crop_height, crop_width, min_crop_ratio=0.6, resample=Image.BILINEAR):
# Aspect ratio inherited from (crop_height, crop_width)
self.crop_aspect = crop_height / crop_width
self.crop_shape = (crop_height, crop_width)
self.min_crop_ratio = min_crop_ratio
self.resample = resample
def __call__(self, inputs, targets):
# height, width, _ = inputs[0].shape
width, height = inputs[0].size
crop_aspect = self.crop_aspect
crop_shape = self.crop_shape
resample = self.resample
min_crop_ratio = self.min_crop_ratio
source_aspect = height / width
# sample a crop factor in [min_crop_ratio, 1.)
crop_ratio = random.uniform(min_crop_ratio, 1.0)
# Preserve aspect ratio provided by (crop_height, crop_width)
# Calculate crop height and with, apply crop_ratio along the min(height,width)'s axis
if crop_aspect < source_aspect:
cwidth = int(width * crop_ratio)
cheight = int(cwidth * crop_aspect)
else:
cheight = int(height * crop_ratio)
cwidth = int(cheight / crop_aspect)
# Avoid bilinear re-sampling crop_size == full_size
if cheight == cwidth and cwidth == width:
return inputs, targets
# sample crop indices
left = random.randint(0, width - cwidth)
top = random.randint(0, height - cheight)
for i, image in enumerate(inputs):
inputs[i] = transf.resized_crop(inputs[i], top, left, cheight, cwidth, crop_shape, interpolation=resample)
for i, image in enumerate(targets):
targets[i] = transf.resized_crop(targets[i], top, left, cheight, cwidth, crop_shape, interpolation=resample)
return inputs, targets
class RandomHorizontalFlip(object):
"""Apply a random horizontal flip."""
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, inputs, targets):
#
if random.random() < self.prob:
return inputs, targets
# Apply a horizontal flip
for i, image in enumerate(inputs):
inputs[i] = transf.hflip(image)
for i, image in enumerate(targets):
targets[i] = transf.hflip(image)
return inputs, targets
class RandomVerticalFlip(object):
"""Apply a random vertical flip."""
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, inputs, targets):
#
if random.random() < self.prob:
return inputs, targets
# Apply a vertical flip
for i, image in enumerate(inputs):
inputs[i] = transf.vflip(image)
for i, image in enumerate(targets):
targets[i] = transf.vflip(image)
return inputs, targets
class RandomReverseSequence(object):
"""Randomly reverse the order of inputs, and targets"""
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, inputs, targets):
if random.random() < self.prob:
return inputs, targets
# Reverse sequence
inputs = inputs[::-1]
targets = targets[::-1]
return inputs, targets
"""
Photometric Augmentation
"""
class RandomGamma(object):
"""Apply a gamma transformation, with gamma factor of (gamma_low, anf gamma_high)"""
def __init__(self, gamma_low, gamma_high):
self.gamma_low = gamma_low
self.gamma_high = gamma_high
def __call__(self, inputs, targets):
gamma = random.uniform(self.gamma_low, self.gamma_high)
if gamma == 1.0:
return inputs, targets
gamma_inv = 1. / gamma
# Apply a gamma
for i, image in enumerate(inputs):
image = np.power(image / 255.0, gamma_inv) * 255.0
inputs[i] = np.clip(image, 0., 255.)
for i, image in enumerate(targets):
image = np.power(image / 255.0, gamma_inv) * 255.0
targets[i] = np.clip(image, 0., 255.)
return inputs, targets
class RandomBrightness(object):
"""Apply a random brightness to each channel in the image.
An implementation that is quite distinct from torchvision.
"""
def __init__(self, brightness_factor=0.1):
self.brightness_factor = brightness_factor
def __call__(self, inputs, targets):
brighness_factor = [1 + random.uniform(-self.brightness_factor, self.brightness_factor) for _ in range(3)]
brighness_factor = np.array(brighness_factor)
# Apply a brightness
for i, image in enumerate(inputs):
image = image * brighness_factor
inputs[i] = np.clip(image, 0., 255.)
for i, image in enumerate(targets):
image = image * brighness_factor
targets[i] = np.clip(image, 0., 255.)
return inputs, targets
class RandomColorOrder(object):
"""Randomly re-order the channels of images.
"""
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, inputs, targets):
if random.random() < self.prob:
return inputs, targets
new_order = np.random.permutation(3)
# Apply a brightness
for i, image in enumerate(inputs):
inputs[i] = image[..., new_order]
for i, image in enumerate(targets):
targets[i] = image[..., new_order]
return inputs, targets
class RandomContrast(object):
"""Apply a random contrast in the range (contrast_low, contrast_high) to all channels.
An implementation that is quite distinct from torchvision.
"""
def __init__(self, contrast_low, contrast_high):
self.contrast_low = contrast_low
self.contrast_high = contrast_high
def __call__(self, inputs, targets):
contrast = 1 + random.uniform(self.contrast_low, self.contrast_high)
# Apply a contrast
for i, image in enumerate(inputs):
gray_img = image[..., 0] * 0.299 + image[..., 1] * 0.587 + image[..., 2] * 0.114
tmp_img = np.ones_like(image) * gray_img.mean()
image = image * contrast + (1 - contrast) * tmp_img
inputs[i] = np.clip(image, 0, 255)
for i, image in enumerate(targets):
gray_img = image[..., 0] * 0.299 + image[..., 1] * 0.587 + image[..., 2] * 0.114
tmp_img = np.ones_like(image) * gray_img.mean()
image = image * contrast + (1 - contrast) * tmp_img
targets[i] = np.clip(image, 0, 255)
return inputs, targets
class RandomSaturation(object):
"""Apply a random saturation in the range (saturation_low, saturation_high) to all channels.
An implementation that is quite distinct from torchvision.
"""
def __init__(self, saturation_low, saturation_high):
self.saturation_low = saturation_low
self.saturation_high = saturation_high
def __call__(self, inputs, targets):
saturation = 1 + random.uniform(self.saturation_low, self.saturation_high)
if saturation == 1.0:
return inputs, targets
# Apply a saturation
for i, image in enumerate(inputs):
gray_img = image[..., 0] * 0.299 + image[..., 1] * image[..., 2] * 0.114
tmp_img = np.stack((gray_img, gray_img, gray_img), axis=2)
image = image * saturation + (1 - saturation) * tmp_img
inputs[i] = np.clip(image, 0, 255)
for i, image in enumerate(targets):
gray_img = image[..., 0] * 0.299 + image[..., 1] * image[..., 2] * 0.114
tmp_img = np.stack((gray_img, gray_img, gray_img), axis=2)
image = image * saturation + (1 - saturation) * tmp_img
targets[i] = np.clip(image, 0, 255)
return inputs, targets
| unsupervised-video-interpolation-master | datasets/data_transforms.py |
# *****************************************************************************
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# The baseline Super SloMo relies on torch.nn.functional.grid_sample to implement a warping module.
# To ensure that our results replicate published accuracy numbers, we also implement a Resample2D layer
# in a similar way, completely with torch tensors, as is done in:
# https://github.com/avinashpaliwal/Super-SloMo/blob/master/model.py#L213
#
# However, for faster training, we suggest to use our CUDA kernels for Resample2D, here:
# https://github.com/NVIDIA/flownet2-pytorch/blob/master/networks/resample2d_package/resample2d.py
#
# from flownet2_pytorch.networks.resample2d_package.resample2d import Resample2d
#
class MyResample2D(nn.Module):
def __init__(self, width, height):
super(MyResample2D, self).__init__()
self.width = width
self.height = height
# make grids for horizontal and vertical displacements
grid_w, grid_h = np.meshgrid(np.arange(width), np.arange(height))
grid_w, grid_h = grid_w.reshape((1,) + grid_w.shape), grid_h.reshape((1,) + grid_h.shape)
self.register_buffer("grid_w", torch.tensor(grid_w, requires_grad=False, dtype=torch.float32))
self.register_buffer("grid_h", torch.tensor(grid_h, requires_grad=False, dtype=torch.float32))
def forward(self, im, uv):
# Get relative displacement
u = uv[:, 0, ...]
v = uv[:, 1, ...]
# Calculate absolute displacement along height and width axis -> (batch_size, height, width)
ww = self.grid_w.expand_as(u) + u
hh = self.grid_h.expand_as(v) + v
# Normalize indices to [-1,1]
ww = 2 * ww / (self.width - 1) - 1
hh = 2 * hh / (self.height - 1) - 1
# Form a grid of shape (batch_size, height, width, 2)
norm_grid_wh = torch.stack((ww, hh), dim=-1)
# Perform a resample
reampled_im = torch.nn.functional.grid_sample(im, norm_grid_wh)
return reampled_im
class DummyModel(nn.Module):
def __init__(self):
super(DummyModel, self).__init__()
def forward(self, inputs, target_index):
return {}, inputs['image'][1], inputs['image'][1]
| unsupervised-video-interpolation-master | models/model_utils.py |
# *****************************************************************************
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
from __future__ import division
from __future__ import print_function
import torch
import torch.nn.functional as F
from .model_utils import MyResample2D, DummyModel
from .HJSuperSloMo import HJSuperSloMo
class CycleHJSuperSloMo(HJSuperSloMo):
def __init__(self, args, mean_pix=[109.93, 109.167, 101.455]):
super(CycleHJSuperSloMo, self).__init__(args=args, mean_pix=mean_pix)
if args.resume:
self.teacher = HJSuperSloMo(args)
checkpoint = torch.load(args.resume, map_location='cpu')
self.teacher.load_state_dict(checkpoint['state_dict'], strict=False)
for param in self.teacher.parameters():
param.requires_grad = False
self.teacher_weight = 0.8
if 'teacher_weight' in args and args.teacher_weight >= 0:
self.teacher_weight = args.teacher_weight
else:
self.teacher = DummyModel()
self.teacher_weight = 0.
def network_output(self, inputs, target_index):
im1, im2 = inputs
# Estimate bi-directional optical flows between input low FPS frame pairs
# Downsample images for robust intermediate flow estimation
ds_im1 = F.interpolate(im1, scale_factor=1./self.scale, mode='bilinear', align_corners=False)
ds_im2 = F.interpolate(im2, scale_factor=1./self.scale, mode='bilinear', align_corners=False)
uvf, bottleneck_out, uvb = self.make_flow_prediction(torch.cat((ds_im1, ds_im2), dim=1))
uvf = self.scale * F.interpolate(uvf, scale_factor=self.scale, mode='bilinear', align_corners=False)
uvb = self.scale * F.interpolate(uvb, scale_factor=self.scale, mode='bilinear', align_corners=False)
bottleneck_out = F.interpolate(bottleneck_out, scale_factor=self.scale, mode='bilinear', align_corners=False)
t = self.tlinespace[target_index]
t = t.reshape(t.shape[0], 1, 1, 1)
uvb_t_raw = - (1 - t) * t * uvf + t * t * uvb
uvf_t_raw = (1 - t) * (1 - t) * uvf - (1 - t) * t * uvb
im1w_raw = self.resample2d(im1, uvb_t_raw) # im1w_raw
im2w_raw = self.resample2d(im2, uvf_t_raw) # im2w_raw
# Perform intermediate bi-directional flow refinement
uv_t_data = torch.cat((im1, im2, im1w_raw, uvb_t_raw, im2w_raw, uvf_t_raw), dim=1)
uvf_t, uvb_t, t_vis_map = self.make_flow_interpolation(uv_t_data, bottleneck_out)
uvb_t = uvb_t_raw + uvb_t # uvb_t
uvf_t = uvf_t_raw + uvf_t # uvf_t
im1w = self.resample2d(im1, uvb_t) # im1w
im2w = self.resample2d(im2, uvf_t) # im2w
# Compute final intermediate frame via weighted blending
alpha1 = (1 - t) * t_vis_map
alpha2 = t * (1 - t_vis_map)
denorm = alpha1 + alpha2 + 1e-10
im_t_out = (alpha1 * im1w + alpha2 * im2w) / denorm
return im_t_out, uvb, uvf
def network_eval(self, inputs, target_index):
_, _, height, width = inputs[0].shape
self.resample2d = MyResample2D(width, height).cuda()
# Normalize inputs
im1, im_target, im2 = [(im - self.mean_pix) for im in inputs]
im_t_out, uvb, uvf = self.network_output([im1, im2], target_index)
# Calculate losses
losses = {}
losses['pix_loss'] = self.L1_loss(im_t_out, im_target)
im_t_out_features = self.vgg16_features(im_t_out / 255.)
im_target_features = self.vgg16_features(im_target / 255.)
losses['vgg16_loss'] = self.L2_loss(im_t_out_features, im_target_features)
losses['warp_loss'] = self.L1_loss(self.resample2d(im1, uvb.contiguous()), im2) + \
self.L1_loss(self.resample2d(im2, uvf.contiguous()), im1)
smooth_bwd = self.L1_loss(uvb[:, :, :, :-1], uvb[:, :, :, 1:]) + \
self.L1_loss(uvb[:, :, :-1, :], uvb[:, :, 1:, :])
smooth_fwd = self.L1_loss(uvf[:, :, :, :-1], uvf[:, :, :, 1:]) + \
self.L1_loss(uvf[:, :, :-1, :], uvf[:, :, 1:, :])
losses['smooth_loss'] = smooth_bwd + smooth_fwd
# Coefficients for total loss determined empirically using a validation set
losses['tot'] = 0.8 * losses['pix_loss'] + 0.4 * losses['warp_loss'] + 0.005 * losses['vgg16_loss'] + losses[
'smooth_loss']
# Converts back to (0, 255) range
im_t_out = im_t_out + self.mean_pix
im_target = im_target + self.mean_pix
return losses, im_t_out, im_target
def forward(self, inputs, target_index):
if 'image' in inputs:
inputs = inputs['image']
if not self.training:
return self.network_eval(inputs, target_index)
self.resample2d = MyResample2D(inputs[0].shape[-1], inputs[0].shape[-2]).cuda()
# Input frames
im1, im2, im3 = inputs
# Calculate Pseudo targets at interm_index
with torch.no_grad():
_, psuedo_gt12, _ = self.teacher({'image': [im1, im1, im2]}, target_index)
_, psuedo_gt23, _ = self.teacher({'image': [im2, im3, im3]}, target_index)
psuedo_gt12, psuedo_gt23 = psuedo_gt12 - self.mean_pix, psuedo_gt23 - self.mean_pix
im1, im2, im3 = im1 - self.mean_pix, im2 - self.mean_pix, im3 - self.mean_pix
pred12, pred12_uvb, pred12_uvf = self.network_output([im1, im2], target_index)
pred23, pred23_uvb, pred23_uvf = self.network_output([im2, im3], target_index)
target_index = (self.args.num_interp + 1) - target_index
ds_pred12 = F.interpolate(pred12, scale_factor=1./self.scale, mode='bilinear', align_corners=False)
ds_pred23 = F.interpolate(pred23, scale_factor=1./self.scale, mode='bilinear', align_corners=False)
uvf, bottleneck_out, uvb = self.make_flow_prediction(torch.cat((ds_pred12, ds_pred23), dim=1))
uvf = self.scale * F.interpolate(uvf, scale_factor=self.scale, mode='bilinear', align_corners=False)
uvb = self.scale * F.interpolate(uvb, scale_factor=self.scale, mode='bilinear', align_corners=False)
bottleneck_out = F.interpolate(bottleneck_out, scale_factor=self.scale, mode='bilinear', align_corners=False)
t = self.tlinespace[target_index]
t = t.reshape(t.shape[0], 1, 1, 1)
uvb_t_raw = - (1 - t) * t * uvf + t * t * uvb
uvf_t_raw = (1 - t) * (1 - t) * uvf - (1 - t) * t * uvb
im12w_raw = self.resample2d(pred12, uvb_t_raw) # im1w_raw
im23w_raw = self.resample2d(pred23, uvf_t_raw) # im2w_raw
# Perform intermediate bi-directional flow refinement
uv_t_data = torch.cat((pred12, pred23, im12w_raw, uvb_t_raw, im23w_raw, uvf_t_raw), dim=1)
uvf_t, uvb_t, t_vis_map = self.make_flow_interpolation(uv_t_data, bottleneck_out)
uvb_t = uvb_t_raw + uvb_t # uvb_t
uvf_t = uvf_t_raw + uvf_t # uvf_t
im12w = self.resample2d(pred12, uvb_t) # im1w
im23w = self.resample2d(pred23, uvf_t) # im2w
# Compute final intermediate frame via weighted blending
alpha1 = (1 - t) * t_vis_map
alpha2 = t * (1 - t_vis_map)
denorm = alpha1 + alpha2 + 1e-10
im_t_out = (alpha1 * im12w + alpha2 * im23w) / denorm
# Calculate training loss
losses = {}
losses['pix_loss'] = self.L1_loss(im_t_out, im2)
im_t_out_features = self.vgg16_features(im_t_out/255.)
im2_features = self.vgg16_features(im2/255.)
losses['vgg16_loss'] = self.L2_loss(im_t_out_features, im2_features)
losses['warp_loss'] = self.L1_loss(im12w_raw, im2) + self.L1_loss(im23w_raw, im2) + \
self.L1_loss(self.resample2d(pred12, uvb), pred23) + \
self.L1_loss(self.resample2d(pred23, uvf), pred12) + \
self.L1_loss(self.resample2d(im1, pred12_uvb), im2) + \
self.L1_loss(self.resample2d(im2, pred12_uvf), im1) + \
self.L1_loss(self.resample2d(im2, pred23_uvb), im3) + \
self.L1_loss(self.resample2d(im3, pred23_uvf), im2)
smooth_bwd = self.L1_loss(uvb[:, :, :, :-1], uvb[:, :, :, 1:]) + \
self.L1_loss(uvb[:, :, :-1, :], uvb[:, :, 1:, :]) + \
self.L1_loss(pred12_uvb[:, :, :, :-1], pred12_uvb[:, :, :, 1:]) + \
self.L1_loss(pred12_uvb[:, :, :-1, :], pred12_uvb[:, :, 1:, :]) + \
self.L1_loss(pred23_uvb[:, :, :, :-1], pred23_uvb[:, :, :, 1:]) + \
self.L1_loss(pred23_uvb[:, :, :-1, :], pred23_uvb[:, :, 1:, :])
smooth_fwd = self.L1_loss(uvf[:, :, :, :-1], uvf[:, :, :, 1:]) + \
self.L1_loss(uvf[:, :, :-1, :], uvf[:, :, 1:, :]) + \
self.L1_loss(pred12_uvf[:, :, :, :-1], pred12_uvf[:, :, :, 1:]) + \
self.L1_loss(pred12_uvf[:, :, :-1, :], pred12_uvf[:, :, 1:, :]) + \
self.L1_loss(pred23_uvf[:, :, :, :-1], pred23_uvf[:, :, :, 1:]) + \
self.L1_loss(pred23_uvf[:, :, :-1, :], pred23_uvf[:, :, 1:, :])
losses['loss_smooth'] = smooth_bwd + smooth_fwd
losses['teacher'] = self.L1_loss(psuedo_gt12, pred12) + self.L1_loss(psuedo_gt23, pred23)
# Coefficients for total loss determined empirically using a validation set
losses['tot'] = self.pix_alpha * losses['pix_loss'] + self.warp_alpha * losses['warp_loss'] + \
self.vgg16_alpha * losses['vgg16_loss'] + self.smooth_alpha * losses['loss_smooth'] + self.teacher_weight * losses['teacher']
# Converts back to (0, 255) range
im_t_out = im_t_out + self.mean_pix
im_target = im2 + self.mean_pix
return losses, im_t_out, im_target
| unsupervised-video-interpolation-master | models/CycleHJSuperSloMo.py |
# *****************************************************************************
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
from .HJSuperSloMo import *
from .CycleHJSuperSloMo import *
| unsupervised-video-interpolation-master | models/__init__.py |
# *****************************************************************************
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from .model_utils import MyResample2D
class HJSuperSloMo(nn.Module):
def __init__(self, args, mean_pix=[109.93, 109.167, 101.455], in_channel=6):
super(HJSuperSloMo, self).__init__()
self.is_output_flow = False
# --------------------- encoder --------------------
# conv1
self.flow_pred_encoder_layer1 = self.make_flow_pred_encoder_layer(in_channel, 32, 7, 3)
self.flow_pred_encoder_layer2 = self.make_flow_pred_encoder_layer(32, 64, 5, 2)
self.flow_pred_encoder_layer3 = self.make_flow_pred_encoder_layer(64, 128)
self.flow_pred_encoder_layer4 = self.make_flow_pred_encoder_layer(128, 256)
self.flow_pred_encoder_layer5 = self.make_flow_pred_encoder_layer(256, 512)
self.flow_pred_bottleneck = self.make_flow_pred_encoder_layer(512, 512)
self.flow_pred_decoder_layer5 = self.make_flow_pred_decoder_layer(512, 512)
self.flow_pred_decoder_layer4 = self.make_flow_pred_decoder_layer(1024, 256)
self.flow_pred_decoder_layer3 = self.make_flow_pred_decoder_layer(512, 128)
self.flow_pred_decoder_layer2 = self.make_flow_pred_decoder_layer(256, 64)
self.flow_pred_decoder_layer1 = self.make_flow_pred_decoder_layer(128, 32)
self.flow_pred_refine_layer = nn.Sequential(
nn.Conv2d(64, 32, 3, padding=1),
nn.LeakyReLU(inplace=True, negative_slope=0.1))
self.forward_flow_conv = nn.Conv2d(32, 2, 1)
self.backward_flow_conv = nn.Conv2d(32, 2, 1)
# -------------- flow interpolation encoder-decoder --------------
self.flow_interp_encoder_layer1 = self.make_flow_interp_encoder_layer(16, 32, 7, 3)
self.flow_interp_encoder_layer2 = self.make_flow_interp_encoder_layer(32, 64, 5, 2)
self.flow_interp_encoder_layer3 = self.make_flow_interp_encoder_layer(64, 128)
self.flow_interp_encoder_layer4 = self.make_flow_interp_encoder_layer(128, 256)
self.flow_interp_encoder_layer5 = self.make_flow_interp_encoder_layer(256, 512)
self.flow_interp_bottleneck = self.make_flow_interp_encoder_layer(512, 512)
self.flow_interp_decoder_layer5 = self.make_flow_interp_decoder_layer(1024, 512)
self.flow_interp_decoder_layer4 = self.make_flow_interp_decoder_layer(1024, 256)
self.flow_interp_decoder_layer3 = self.make_flow_interp_decoder_layer(512, 128)
self.flow_interp_decoder_layer2 = self.make_flow_interp_decoder_layer(256, 64)
self.flow_interp_decoder_layer1 = self.make_flow_interp_decoder_layer(128, 32)
self.flow_interp_refine_layer = nn.Sequential(
nn.Conv2d(64, 32, 3, padding=1),
nn.LeakyReLU(inplace=True, negative_slope=0.1))
self.flow_interp_forward_out_layer = nn.Conv2d(32, 2, 1)
self.flow_interp_backward_out_layer = nn.Conv2d(32, 2, 1)
# visibility
self.flow_interp_vis_layer = nn.Conv2d(32, 1, 1)
self.resample2d_train = MyResample2D(args.crop_size[1], args.crop_size[0])
mean_pix = torch.from_numpy(np.array(mean_pix)).float()
mean_pix = mean_pix.view(1, 3, 1, 1)
self.register_buffer('mean_pix', mean_pix)
self.args = args
self.scale = args.flow_scale
self.L1_loss = nn.L1Loss()
self.L2_loss = nn.MSELoss()
self.ignore_keys = ['vgg', 'grid_w', 'grid_h', 'tlinespace', 'resample2d_train', 'resample2d']
self.register_buffer('tlinespace', torch.linspace(0, 1, 2 + args.num_interp).float())
vgg16 = torchvision.models.vgg16(pretrained=True)
self.vgg16_features = nn.Sequential(*list(vgg16.children())[0][:22])
for param in self.vgg16_features.parameters():
param.requires_grad = False
# loss weights
self.pix_alpha = 0.8
self.warp_alpha = 0.4
self.vgg16_alpha = 0.005
self.smooth_alpha = 1.
def make_flow_pred_encoder_layer(self, in_chn, out_chn, kernel_size=3, padding=1):
layer = nn.Sequential(
nn.Conv2d(in_chn, out_chn, kernel_size, padding=padding),
nn.LeakyReLU(inplace=True, negative_slope=0.1),
nn.Conv2d(out_chn, out_chn, kernel_size, padding=padding),
nn.LeakyReLU(inplace=True, negative_slope=0.1))
return layer
def make_flow_pred_decoder_layer(self, in_chn, out_chn):
layer = nn.Sequential(
nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False),
nn.Conv2d(in_chn, out_chn, 3, padding=1),
nn.LeakyReLU(inplace=True, negative_slope=0.1),
nn.Conv2d(out_chn, out_chn, 3, padding=1),
nn.LeakyReLU(inplace=True, negative_slope=0.1))
return layer
def make_flow_interp_encoder_layer(self, in_chn, out_chn, kernel_size=3, padding=1):
layer = nn.Sequential(
nn.Conv2d(in_chn, out_chn, kernel_size, padding=padding),
nn.LeakyReLU(inplace=True, negative_slope=0.1),
nn.Conv2d(out_chn, out_chn, kernel_size, padding=padding),
nn.LeakyReLU(inplace=True, negative_slope=0.1))
return layer
def make_flow_interp_decoder_layer(self, in_chn, out_chn):
layer = nn.Sequential(
nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False),
nn.Conv2d(in_chn, out_chn, 3, padding=1),
nn.LeakyReLU(inplace=True, negative_slope=0.1),
nn.Conv2d(out_chn, out_chn, 3, padding=1),
nn.LeakyReLU(inplace=True, negative_slope=0.1))
return layer
def make_flow_interpolation(self, in_data, flow_pred_bottleneck_out):
flow_interp_encoder_out1 = self.flow_interp_encoder_layer1(in_data)
flow_interp_encoder_out1_pool = F.avg_pool2d(flow_interp_encoder_out1, 2, stride=2)
flow_interp_encoder_out2 = self.flow_interp_encoder_layer2(flow_interp_encoder_out1_pool)
flow_interp_encoder_out2_pool = F.avg_pool2d(flow_interp_encoder_out2, 2, stride=2)
flow_interp_encoder_out3 = self.flow_interp_encoder_layer3(flow_interp_encoder_out2_pool)
flow_interp_encoder_out3_pool = F.avg_pool2d(flow_interp_encoder_out3, 2, stride=2)
flow_interp_encoder_out4 = self.flow_interp_encoder_layer4(flow_interp_encoder_out3_pool)
flow_interp_encoder_out4_pool = F.avg_pool2d(flow_interp_encoder_out4, 2, stride=2)
flow_interp_encoder_out5 = self.flow_interp_encoder_layer5(flow_interp_encoder_out4_pool)
flow_interp_encoder_out5_pool = F.avg_pool2d(flow_interp_encoder_out5, 2, stride=2)
flow_interp_bottleneck_out = self.flow_interp_bottleneck(flow_interp_encoder_out5_pool)
flow_interp_bottleneck_out = torch.cat((flow_pred_bottleneck_out,
flow_interp_bottleneck_out), dim=1)
flow_interp_decoder_out5 = self.flow_interp_decoder_layer5(flow_interp_bottleneck_out)
flow_interp_decoder_out5 = torch.cat((flow_interp_encoder_out5, flow_interp_decoder_out5), dim=1)
flow_interp_decoder_out4 = self.flow_interp_decoder_layer4(flow_interp_decoder_out5)
flow_interp_decoder_out4 = torch.cat((flow_interp_encoder_out4, flow_interp_decoder_out4), dim=1)
flow_interp_decoder_out3 = self.flow_interp_decoder_layer3(flow_interp_decoder_out4)
flow_interp_decoder_out3 = torch.cat((flow_interp_encoder_out3, flow_interp_decoder_out3), dim=1)
flow_interp_decoder_out2 = self.flow_interp_decoder_layer2(flow_interp_decoder_out3)
flow_interp_decoder_out2 = torch.cat((flow_interp_encoder_out2, flow_interp_decoder_out2), dim=1)
flow_interp_decoder_out1 = self.flow_interp_decoder_layer1(flow_interp_decoder_out2)
flow_interp_decoder_out1 = torch.cat((flow_interp_encoder_out1, flow_interp_decoder_out1), dim=1)
flow_interp_motion_rep = self.flow_interp_refine_layer(flow_interp_decoder_out1)
flow_interp_forward_flow = self.flow_interp_forward_out_layer(flow_interp_motion_rep)
flow_interp_backward_flow = self.flow_interp_backward_out_layer(flow_interp_motion_rep)
flow_interp_vis_map = self.flow_interp_vis_layer(flow_interp_motion_rep)
flow_interp_vis_map = torch.sigmoid(flow_interp_vis_map)
return flow_interp_forward_flow, flow_interp_backward_flow, flow_interp_vis_map
def make_flow_prediction(self, x):
encoder_out1 = self.flow_pred_encoder_layer1(x)
encoder_out1_pool = F.avg_pool2d(encoder_out1, 2, stride=2)
encoder_out2 = self.flow_pred_encoder_layer2(encoder_out1_pool)
encoder_out2_pool = F.avg_pool2d(encoder_out2, 2, stride=2)
encoder_out3 = self.flow_pred_encoder_layer3(encoder_out2_pool)
encoder_out3_pool = F.avg_pool2d(encoder_out3, 2, stride=2)
encoder_out4 = self.flow_pred_encoder_layer4(encoder_out3_pool)
encoder_out4_pool = F.avg_pool2d(encoder_out4, 2, stride=2)
encoder_out5 = self.flow_pred_encoder_layer5(encoder_out4_pool)
encoder_out5_pool = F.avg_pool2d(encoder_out5, 2, stride=2)
bottleneck_out = self.flow_pred_bottleneck(encoder_out5_pool)
decoder_out5 = self.flow_pred_decoder_layer5(bottleneck_out)
decoder_out5 = torch.cat((encoder_out5, decoder_out5), dim=1)
decoder_out4 = self.flow_pred_decoder_layer4(decoder_out5)
decoder_out4 = torch.cat((encoder_out4, decoder_out4), dim=1)
decoder_out3 = self.flow_pred_decoder_layer3(decoder_out4)
decoder_out3 = torch.cat((encoder_out3, decoder_out3), dim=1)
decoder_out2 = self.flow_pred_decoder_layer2(decoder_out3)
decoder_out2 = torch.cat((encoder_out2, decoder_out2), dim=1)
decoder_out1 = self.flow_pred_decoder_layer1(decoder_out2)
decoder_out1 = torch.cat((encoder_out1, decoder_out1), dim=1)
motion_rep = self.flow_pred_refine_layer(decoder_out1)
uvf = self.forward_flow_conv(motion_rep)
uvb = self.backward_flow_conv(motion_rep)
return uvf, bottleneck_out, uvb
def forward(self, inputs, target_index):
if 'image' in inputs:
inputs = inputs['image']
if self.training:
self.resample2d = self.resample2d_train
else:
_, _, height, width = inputs[0].shape
self.resample2d = MyResample2D(width, height).cuda()
# Normalize inputs
im1, im_target, im2 = [(im - self.mean_pix) for im in inputs]
# Estimate bi-directional optical flows between input low FPS frame pairs
# Downsample images for robust intermediate flow estimation
ds_im1 = F.interpolate(im1, scale_factor=1./self.scale, mode='bilinear', align_corners=False)
ds_im2 = F.interpolate(im2, scale_factor=1./self.scale, mode='bilinear', align_corners=False)
uvf, bottleneck_out, uvb = self.make_flow_prediction(torch.cat((ds_im1, ds_im2), dim=1))
uvf = self.scale * F.interpolate(uvf, scale_factor=self.scale, mode='bilinear', align_corners=False)
uvb = self.scale * F.interpolate(uvb, scale_factor=self.scale, mode='bilinear', align_corners=False)
bottleneck_out = F.interpolate(bottleneck_out, scale_factor=self.scale, mode='bilinear', align_corners=False)
t = self.tlinespace[target_index]
t = t.reshape(t.shape[0], 1, 1, 1)
uvb_t_raw = - (1 - t) * t * uvf + t * t * uvb
uvf_t_raw = (1 - t) * (1 - t) * uvf - (1 - t) * t * uvb
im1w_raw = self.resample2d(im1, uvb_t_raw) # im1w_raw
im2w_raw = self.resample2d(im2, uvf_t_raw) # im2w_raw
# Perform intermediate bi-directional flow refinement
uv_t_data = torch.cat((im1, im2, im1w_raw, uvb_t_raw, im2w_raw, uvf_t_raw), dim=1)
uvf_t, uvb_t, t_vis_map = self.make_flow_interpolation(uv_t_data, bottleneck_out)
uvb_t = uvb_t_raw + uvb_t # uvb_t
uvf_t = uvf_t_raw + uvf_t # uvf_t
im1w = self.resample2d(im1, uvb_t) # im1w
im2w = self.resample2d(im2, uvf_t) # im2w
# Compute final intermediate frame via weighted blending
alpha1 = (1 - t) * t_vis_map
alpha2 = t * (1 - t_vis_map)
denorm = alpha1 + alpha2 + 1e-10
im_t_out = (alpha1 * im1w + alpha2 * im2w) / denorm
# Calculate training loss
losses = {}
losses['pix_loss'] = self.L1_loss(im_t_out, im_target)
im_t_out_features = self.vgg16_features(im_t_out/255.)
im_target_features = self.vgg16_features(im_target/255.)
losses['vgg16_loss'] = self.L2_loss(im_t_out_features, im_target_features)
losses['warp_loss'] = self.L1_loss(im1w_raw, im_target) + self.L1_loss(im2w_raw, im_target) + \
self.L1_loss(self.resample2d(im1, uvb.contiguous()), im2) + \
self.L1_loss(self.resample2d(im2, uvf.contiguous()), im1)
smooth_bwd = self.L1_loss(uvb[:, :, :, :-1], uvb[:, :, :, 1:]) + \
self.L1_loss(uvb[:, :, :-1, :], uvb[:, :, 1:, :])
smooth_fwd = self.L1_loss(uvf[:, :, :, :-1], uvf[:, :, :, 1:]) + \
self.L1_loss(uvf[:, :, :-1, :], uvf[:, :, 1:, :])
losses['smooth_loss'] = smooth_bwd + smooth_fwd
# Coefficients for total loss determined empirically using a validation set
losses['tot'] = self.pix_alpha * losses['pix_loss'] + self.warp_alpha * losses['warp_loss'] \
+ self.vgg16_alpha * losses['vgg16_loss'] + self.smooth_alpha * losses['smooth_loss']
# Converts back to (0, 255) range
im_t_out = im_t_out + self.mean_pix
im_target = im_target + self.mean_pix
return losses, im_t_out, im_target
| unsupervised-video-interpolation-master | models/HJSuperSloMo.py |
import torch
import unittest
def random_tensor(shape, dtype, device, mean=0, std=1):
return torch.empty(shape, dtype=dtype, device=device).normal_(mean, std)
class TestGemmDequantize(unittest.TestCase):
def setUp(self) -> None:
torch.classes.load_library("lib/libth_transformer.so")
torch.classes.load_library("lib/libgemm_dq_unit_ops.so")
self.unpack_packed_int4s = torch.ops.fastertransformer.unpack_int4_packed_tensor_to_int8
self.pack_int4s = torch.ops.fastertransformer.pack_int8_tensor_to_packed_int4
self.fused_gemm_dq = torch.ops.gemm_dq_unit_ops.fused_gemm_dq
self.fused_gemm_dq_bias_act = torch.ops.gemm_dq_unit_ops.fused_gemm_dq_bias_act
self.bench = torch.ops.gemm_dq_unit_ops.benchmark_against_cublas_fp
self.preprocess_weights_for_mixed_gemm = torch.ops.fastertransformer.preprocess_weights_for_mixed_gemm
self.symmetric_quantizer = torch.ops.fastertransformer._symmetric_quantize_last_axis_of_batched_matrix
torch.manual_seed(734876213)
def dequantize_test_helper(self, weight_type, quant_type):
assert quant_type == torch.int8 or quant_type == torch.quint4x2
lower_bound = -128 if quant_type == torch.int8 else -8
upper_bound = 127 if quant_type == torch.int8 else 7
m, n, k = 64, 128, 64
weights = torch.randint(lower_bound, upper_bound, [k, n], dtype=torch.int8, device="cpu")
packed_weight = self.pack_int4s(weights) if quant_type == torch.quint4x2 else weights
cuda_weights = self.preprocess_weights_for_mixed_gemm(packed_weight, quant_type).to("cuda")
weights = weights.to("cuda")
act = torch.eye(m, dtype=weight_type, device="cuda")
scales = torch.ones([n], dtype=weight_type, device='cuda')
actual = self.fused_gemm_dq(act, cuda_weights, scales)
torch.testing.assert_close(actual, weights, atol=0, rtol=0, check_dtype=False)
def test_fp16_int8_dequantize(self):
self.dequantize_test_helper(torch.float16, torch.int8)
def test_bf16_int8_dequantize(self):
self.dequantize_test_helper(torch.bfloat16, torch.int8)
def test_fp16_int4_dequantize(self):
self.dequantize_test_helper(torch.float16, torch.quint4x2)
def test_bf16_int4_dequantize(self):
self.dequantize_test_helper(torch.bfloat16, torch.quint4x2)
def apply_act(self, inp, act_str):
if act_str == "identity":
return inp
elif act_str == "silu":
return torch.nn.SiLU()(inp)
elif act_str == "relu":
return torch.nn.ReLU()(inp)
elif act_str == "gelu":
return torch.nn.GELU(approximate="tanh")(inp)
else:
assert False, "Unsupported activation"
def gemm_dequant_test_helper(self, compute_type, weight_dtype, gemm_ms, gemm_ns, gemm_ks, rtol, atol, act_str="only_gemm", benchmark=False):
assert weight_dtype == torch.int8 or weight_dtype == torch.quint4x2, "Weight must be quantized"
for gemm_k in gemm_ks:
for gemm_n in gemm_ns:
torch_weights_cpu = random_tensor((gemm_k, gemm_n), dtype=compute_type, device="cpu", mean=0, std=0.002)
ref_torch_weights, processed_torch_weights, torch_weight_scales = self.symmetric_quantizer(torch_weights_cpu, weight_dtype)
ref_torch_weights = self.unpack_packed_int4s(ref_torch_weights) if weight_dtype == torch.quint4x2 else ref_torch_weights
ref_torch_weights = ref_torch_weights.to("cuda")
processed_torch_weights = processed_torch_weights.to("cuda")
torch_weight_scales = torch_weight_scales.to("cuda")
torch_biases = random_tensor((gemm_n), dtype=compute_type, device="cuda", mean=0, std=0.1)
for num_rows in gemm_ms:
torch_activations = torch.randn(size=(num_rows, gemm_k), dtype=compute_type, device="cuda")
scales_unsqueezed = torch_weight_scales.unsqueeze(0)
casted_weights = ref_torch_weights.to(torch_activations.dtype)
dequantized_weights = torch.multiply(casted_weights, scales_unsqueezed)
if benchmark:
assert act_str == "only_gemm", "Benchmarks against cublas must use just GEMM."
torch.cuda.profiler.start()
times, results = self.bench(torch_activations, processed_torch_weights, torch_weight_scales, dequantized_weights, 200)
torch.cuda.profiler.stop()
times = times[0]
cublas_time = times[0].item()
ft_time = times[1].item()
ft_speedup = cublas_time / ft_time
print("{},{},{},{},{},{}".format(num_rows, gemm_n, gemm_k, cublas_time, ft_time, ft_speedup))
reference_result = results[0]
ft_result = results[1]
else:
if act_str == "only_gemm":
reference_result = torch.matmul(torch_activations, dequantized_weights)
ft_result = self.fused_gemm_dq(torch_activations, processed_torch_weights, torch_weight_scales)
else:
reference_result = torch.matmul(torch_activations, dequantized_weights)
reference_result += torch_biases.unsqueeze(0)
reference_result = self.apply_act(reference_result, act_str)
ft_result = self.fused_gemm_dq_bias_act(torch_activations, processed_torch_weights, torch_weight_scales, torch_biases, act_str)
msg = "FC1 Failed on m={}, n={}, k={}".format(num_rows, gemm_n, gemm_k)
torch.testing.assert_close(ft_result, reference_result, rtol=rtol, atol=atol, msg=msg, check_dtype=False)
def test_fp16_int8_gemm(self):
self.gemm_dequant_test_helper(torch.float16, torch.int8,
gemm_ms = [256, 177, 195, 125, 66, 33, 8, 2, 1],
gemm_ns = [1024, 2048, 4096],
gemm_ks = [4096, 8192, 16384],
rtol=0.001, atol=0.002)
def test_fp16_int4_gemm(self):
self.gemm_dequant_test_helper(torch.float16, torch.quint4x2,
gemm_ms = [256, 177, 195, 125, 66, 33, 8, 2, 1],
gemm_ns = [1024, 2048, 4096],
gemm_ks = [4096, 8192, 16384],
rtol=0.001, atol=0.002)
def test_bf16_int8_gemm(self):
self.gemm_dequant_test_helper(torch.bfloat16, torch.int8,
gemm_ms = [256, 177, 195, 125, 66, 33, 8, 2, 1],
gemm_ns = [1024, 2048, 4096],
gemm_ks = [4096, 8192, 16384],
rtol=0.01, atol=0.01)
def test_bf16_int4_gemm(self):
self.gemm_dequant_test_helper(torch.bfloat16, torch.quint4x2,
gemm_ms = [256, 177, 195, 125, 66, 33, 8, 2, 1],
gemm_ns = [1024, 2048, 4096],
gemm_ks = [4096, 8192, 16384],
rtol=0.01, atol=0.01)
def test_fp16_int8_gemm_bias(self):
self.gemm_dequant_test_helper(torch.float16, torch.int8,
gemm_ms = [256],
gemm_ns = [1024],
gemm_ks = [8192],
rtol=0.001, atol=0.002,
act_str="identity")
def test_fp16_int8_gemm_bias_relu(self):
self.gemm_dequant_test_helper(torch.float16, torch.int8,
gemm_ms = [256],
gemm_ns = [1024],
gemm_ks = [8192],
rtol=0.001, atol=0.002,
act_str="relu")
def test_fp16_int8_gemm_bias_gelu(self):
self.gemm_dequant_test_helper(torch.float16, torch.int8,
gemm_ms = [256],
gemm_ns = [1024],
gemm_ks = [8192],
rtol=0.001, atol=0.002,
act_str="gelu")
def test_fp16_int8_gemm_bias_silu(self):
self.gemm_dequant_test_helper(torch.float16, torch.int8,
gemm_ms = [256],
gemm_ns = [1024],
gemm_ks = [8192],
rtol=0.001, atol=0.002,
act_str="silu")
def bench_helper(self, act_type, quant_type, rtol, atol):
# Warm, using bfloat here since it seems to reliably use cublas.
x = random_tensor([20480, 20480], torch.bfloat16, device="cuda")
warm_iters = 30
for iter in range(warm_iters):
res = x @ x
m_shapes = torch.arange(0, 12)
m_shapes = 2 ** m_shapes
self.gemm_dequant_test_helper(act_type, quant_type,
gemm_ms = [128],
gemm_ns = [1536],
gemm_ks = [12288],
rtol=rtol, atol=atol, benchmark=True)
@unittest.skip("This is a benchmark so don't run by default")
def test_fp16_int8_cublas(self):
self.bench_helper(torch.float16, torch.int8, 1e-3, 0.002)
@unittest.skip("This is a benchmark so don't run by default")
def test_bf16_int8_cublas(self):
self.bench_helper(torch.bfloat16, torch.int8, 1e-2, 1e-2)
@unittest.skip("This is a benchmark so don't run by default")
def test_fp16_int4_cublas(self):
self.bench_helper(torch.float16, torch.quint4x2, 1e-3, 0.002)
@unittest.skip("This is a benchmark so don't run by default")
def test_bf16_int4_cublas(self):
self.bench_helper(torch.bfloat16, torch.quint4x2, 1e-2, 1e-2)
if __name__ == '__main__':
unittest.main() | FasterTransformer-main | tests/gemm_dequantize/th_gemm_dequantize.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import unittest
from pathlib import Path
import torch
@unittest.skipUnless(Path('lib/libth_transformer.so').exists(),
'lib/libth_transformer.so does not exist.')
class TestDecodeOp(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
torch.classes.load_library('lib/libth_transformer.so')
def setUp(self) -> None:
self.vocab_size = 1024
self.vocab_size_padded = 1024
self.tensor_para_size = 1
self.pipeline_para_size = 1
self.end_id = 0 # eos token id.
self.decode_op = None
self.decode_op = torch.classes.FasterTransformer.DynamicDecodeOp(
self.vocab_size,
self.vocab_size_padded,
self.tensor_para_size,
self.pipeline_para_size,
torch.float)
def tearDown(self):
del self.decode_op
def initialize_input_token_ids(self,
batch_size,
beam_width,
max_input_length,
use_random_input_tokens=True):
device = torch.cuda.current_device()
if use_random_input_tokens:
input_token_ids = torch.randint(
1, self.vocab_size, (batch_size, max_input_length),
dtype=torch.int, device=device)
else:
input_token_ids = torch.ones(
(batch_size, max_input_length), dtype=torch.int, device=device)
input_token_ids += self.end_id
input_token_ids.remainder_(self.vocab_size)
input_lengths = torch.randint(
1, max_input_length + 1, (batch_size,), dtype=torch.int, device=device)
input_lengths[torch.randint(0, batch_size, (1,))] = max_input_length
# mask by end_id.
step_indices = torch.arange(0, max_input_length, device=device)\
.unsqueeze(0).tile(batch_size, 1)
input_token_ids[input_lengths.unsqueeze(1) <= step_indices] = self.end_id
# Tiling.
input_token_ids = input_token_ids.repeat(1, beam_width)\
.view(batch_size * beam_width, -1)
input_lengths = input_lengths.view(-1, 1).repeat(1, beam_width).view(-1)
return input_token_ids, input_lengths
@staticmethod
def safe_to_vec(batch_size, value, dtype, is_cpu=False):
if value is None:
return None
device = torch.device('cpu') if is_cpu else torch.cuda.current_device()
return value * torch.ones(batch_size, dtype=dtype, device=device)
def run_decode(self,
batch_size=4,
beam_width=1,
max_input_length=4,
gen_length=3,
ite=0,
local_batch_size=None,
top_k=None,
top_p=None,
temperature=None,
repetition_penalty=None,
presence_penalty=None,
min_length=None,
len_penalty=None,
beam_search_diversity_rate=None,
random_seed=None,
top_p_decay=None,
top_p_min=None,
top_p_reset_ids=None,
logit_fn=None,
use_random_input_tokens=True):
ite = 0
local_batch_size = batch_size
max_seq_length = max_input_length + gen_length
device = torch.cuda.current_device()
eos_token_ids = self.safe_to_vec(batch_size, self.end_id, torch.int, False)
top_ks = self.safe_to_vec(batch_size, top_k, torch.int, True)
top_ps = self.safe_to_vec(batch_size, top_p, torch.float, True)
temperatures = self.safe_to_vec(batch_size, temperature, torch.float, True)
repetition_penalties = self.safe_to_vec(batch_size, repetition_penalty, torch.float, True)
presence_penalties = self.safe_to_vec(batch_size, presence_penalty, torch.float, True)
min_lengths = self.safe_to_vec(batch_size, min_length, torch.int, True)
len_penalties = self.safe_to_vec(batch_size, len_penalty, torch.float, True)
beam_search_diversity_rates = self.safe_to_vec(batch_size, beam_search_diversity_rate, torch.float, True)
random_seeds = self.safe_to_vec(batch_size, random_seed, torch.int64, True)
top_p_decays = self.safe_to_vec(batch_size, top_p_decay, torch.float, False)
top_p_mins = self.safe_to_vec(batch_size, top_p_min, torch.float, False)
top_p_reset_ids = self.safe_to_vec(batch_size, top_p_reset_ids, torch.int, False)
embedding_bias = None
sequence_limit_lengths = None # limit step
stop_words_list = None
bad_words_list = None
if beam_width > 1:
parent_ids = torch.zeros(
(max_seq_length, batch_size * beam_width),
dtype=torch.int32, device=device)
# src/tgt cache indirections.
cache_indirection = torch.zeros(
(2, batch_size, beam_width, max_seq_length),
dtype=torch.int32, device=device)
cum_log_probs = torch.zeros(batch_size * beam_width, device=device)
output_log_probs = None
else:
parent_ids = None
cache_indirection = None
src_cache_indirection = None
tgt_cache_indirection = None
cum_log_probs = None
output_log_probs = None
input_token_ids, input_lengths = self.initialize_input_token_ids(
batch_size, beam_width, max_input_length, use_random_input_tokens)
self.decode_op.setup(batch_size,
beam_width,
top_ks,
top_ps,
temperatures,
repetition_penalties,
presence_penalties,
min_lengths,
len_penalties,
beam_search_diversity_rates,
random_seeds,
top_p_decays,
top_p_mins,
top_p_reset_ids)
finished = torch.zeros_like(input_lengths).bool()
sequence_lengths = (max_input_length - 1) * torch.ones_like(input_lengths)
# Contiguous buffer for each decode_op step, will be transposed.
output_token_ids = torch.zeros(
(max_seq_length, batch_size * beam_width),
dtype=torch.int32, device=device)
output_token_ids[:max_input_length, ...] = input_token_ids.T
for step in range(max_input_length, max_seq_length):
if cache_indirection is not None:
bidx = range(ite * local_batch_size,
min((ite + 1) * local_batch_size, batch_size))
src_indir_idx = (step - max_input_length) % 2
tgt_indir_idx = 1 - src_indir_idx
src_cache_indirection = cache_indirection[src_indir_idx, bidx, ...]
tgt_cache_indirection = cache_indirection[tgt_indir_idx, bidx, ...]
if logit_fn is None:
logits = torch.randn(
(batch_size, beam_width, self.vocab_size_padded), device=device)
else:
logits = logit_fn(batch_size, beam_width, device)
should_stop = self.decode_op.forward(
logits,
step,
max_input_length,
ite,
local_batch_size,
eos_token_ids,
top_ks,
top_ps,
temperatures,
repetition_penalties,
presence_penalties,
min_lengths,
len_penalties,
beam_search_diversity_rates,
top_p_decays,
top_p_mins,
top_p_reset_ids,
embedding_bias,
input_lengths,
sequence_limit_lengths,
stop_words_list,
bad_words_list,
src_cache_indirection,
output_token_ids.view(-1, batch_size, beam_width),
finished,
sequence_lengths,
cum_log_probs,
output_log_probs,
parent_ids,
tgt_cache_indirection)
if should_stop:
break
# Output sequence length is seqlen + 1 since
output_sequence_lengths = sequence_lengths + 1
return dict(
output_token_ids=output_token_ids.T,
output_sequence_lengths=output_sequence_lengths
)
def test_min_length_correctness_at_sampling(self):
methods = [dict(top_k=1, top_p=0.0), dict(top_k=0, top_p=0.8)]
testcases = [
dict(batch_size=4, max_input_length=4, min_length=2, gen_length=4),
dict(batch_size=4, max_input_length=4, min_length=32, gen_length=64),
# batch exceeds 1024
dict(batch_size=2048, max_input_length=6, min_length=4, gen_length=8),
]
def logit_fn(batch_size, beam_width, device):
logits = torch.randn(
(batch_size, beam_width, self.vocab_size_padded), device=device)
# Make the eos token be the most probable.
logits[..., self.end_id] = logits.max(dim=-1)[0] + 1
return logits
for tc, method in itertools.product(testcases, methods):
tc.update(method)
with self.subTest(tc):
output_dict = self.run_decode(beam_width=1, logit_fn=logit_fn, **tc)
output_seq_lengths = output_dict['output_sequence_lengths']
min_sequence_length = tc['max_input_length'] + tc['min_length']
self.assertTrue(
(output_seq_lengths >= min_sequence_length).all(),
f'failed indices {torch.where(output_seq_lengths < min_sequence_length)[0]}, '
f'values {output_seq_lengths[torch.where(output_seq_lengths < min_sequence_length)[0]]}')
def test_min_length_correctness_at_beamsearch(self):
testcases = [
# Online Beamsearch
dict(batch_size=4, beam_width=2, max_input_length=4, min_length=2, gen_length=4),
# Beamsearch
dict(batch_size=4, beam_width=16, max_input_length=4, min_length=2, gen_length=4),
# batch * beam exceeds 1024
dict(batch_size=1024, beam_width=2, max_input_length=4, min_length=4, gen_length=8),
dict(batch_size=128, beam_width=16, max_input_length=4, min_length=4, gen_length=8),
# large beam_width
dict(batch_size=4, beam_width=60, max_input_length=4, min_length=4, gen_length=8),
]
def logit_fn(batch_size, beam_width, device):
logits = torch.randn(
(batch_size, beam_width, self.vocab_size_padded), device=device)
# Make the eos token be the most probable.
logits[..., self.end_id] = logits.max(dim=-1)[0] + 1
return logits
for tc in testcases:
with self.subTest(tc):
output_dict = self.run_decode(logit_fn=logit_fn, **tc)
output_seq_lengths = output_dict['output_sequence_lengths']
min_sequence_length = tc['max_input_length'] + tc['min_length']
self.assertTrue(
(output_seq_lengths >= min_sequence_length).all(),
f'failed indices {torch.where(output_seq_lengths < min_sequence_length)[0]}, '
f'values {output_seq_lengths[torch.where(output_seq_lengths < min_sequence_length)[0]]}')
def test_repetition_penalty_correctness(self):
methods = [dict(top_k=1, top_p=0.0), dict(beam_width=2)]
testcases = [
dict(batch_size=4, max_input_length=4, repetition_penalty=2),
dict(batch_size=2048, max_input_length=4, repetition_penalty=2),
dict(batch_size=4, max_input_length=4, presence_penalty=0.5),
dict(batch_size=4, max_input_length=4, presence_penalty=1.0),
dict(batch_size=2048, max_input_length=4, presence_penalty=0.5),
]
def logit_fn(batch_size, beam_width, device):
logits = torch.zeros(
(batch_size, beam_width, self.vocab_size_padded), device=device)
# The token (vocab_size - 1) is the most probable unless penalized.
# After penalized, the expected output token ids will be
# [v-1, v-2, v-3, v-1, v-1, v-1, ...].
logits[..., self.vocab_size - 1] = 2
logits[..., self.vocab_size - 2] = 1.8
logits[..., self.vocab_size - 3] = 1.6
return logits
for tc, method in itertools.product(testcases, methods):
tc.update(method)
gen_length = 5
beam_width = tc.get('beam_width', 1)
expected_toekn_ids = (self.vocab_size - 1) * torch.ones(
(tc['batch_size'] * beam_width, gen_length), dtype=torch.int32)
expected_toekn_ids[:, 1] = self.vocab_size - 2
expected_toekn_ids[:, 2] = self.vocab_size - 3
with self.subTest(tc):
output_dict = self.run_decode(
gen_length=gen_length,
use_random_input_tokens=False, logit_fn=logit_fn, **tc)
output_token_ids = output_dict['output_token_ids'][:, tc['max_input_length']:]
self.assertTrue(
(expected_toekn_ids.to(output_token_ids.device) == output_token_ids).all())
if __name__ == '__main__':
unittest.main()
| FasterTransformer-main | tests/unittests/th_op/test_th_decode_op.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import unittest
class TestQuantize(unittest.TestCase):
def setUp(self) -> None:
torch.classes.load_library("lib/libth_transformer.so")
self.add_bias_and_interleave_int4s = torch.ops.fastertransformer._add_bias_and_interleave_int4s
self.add_bias_and_interleave_int8s = torch.ops.fastertransformer._add_bias_and_interleave_int8s
self.unpack_packed_int4s = torch.ops.fastertransformer.unpack_int4_packed_tensor_to_int8
self.pack_int4s = torch.ops.fastertransformer.pack_int8_tensor_to_packed_int4
self.quant_interleave = torch.ops.fastertransformer._permute_B_rows_for_mixed_gemm
self.subbyte_transpose = torch.ops.fastertransformer._subbyte_transpose
def reference_interleave(self, tensor, quant_type):
assert quant_type == torch.int8 or quant_type == torch.quint4x2
tile_rows = 16 if quant_type == torch.int8 else 32
permutation_map = []
if tile_rows == 16:
permutation_map = [0,1,8,9,2,3,10,11,4,5,12,13,6,7,14,15]
elif tile_rows == 32:
permutation_map = [0,1,8,9,16,17,24,25,2,3,10,11,18,19,26,27,4,5,12,13,20,21,28,29,6,7,14,15,22,23,30,31]
else:
assert False, "Unsuppoered tile cols"
permutation_map = torch.tensor(permutation_map)
temp = tensor.reshape([-1, tile_rows, tensor.shape[-1]])
temp = temp[:, permutation_map, :]
return temp.reshape(tensor.shape)
def interleave_tester(self, quant_type, arch):
assert quant_type == torch.int8 or quant_type == torch.quint4x2
experts = [1, 4, 8]
m_shapes = [128, 256, 1024]
n_shapes = [128, 512, 1024, 4096]
lower_bound = -128 if quant_type == torch.int8 else -8
upper_bound = 127 if quant_type == torch.int8 else 7
ref_impl = self.reference_interleave
for expert in experts:
for m_shape in m_shapes:
for n_shape in n_shapes:
tensor = torch.randint(lower_bound, upper_bound, [expert, m_shape, n_shape], dtype=torch.int8)
ref_result = ref_impl(tensor.reshape([expert * m_shape, n_shape]), quant_type).reshape([expert, m_shape, n_shape])
if quant_type == torch.quint4x2:
tensor = self.pack_int4s(tensor)
act_result = self.quant_interleave(tensor, quant_type, arch)
if quant_type == torch.quint4x2:
act_result = self.unpack_packed_int4s(act_result)
torch.testing.assert_close(act_result, ref_result, rtol=0, atol=0)
def test_volta_int4_interleave(self):
tensor = torch.randint(-8, 7, [12, 128, 128], dtype=torch.int8)
self.assertRaises(RuntimeError, self.quant_interleave, tensor, torch.quint4x2, 70)
def test_volta_int8_interleave(self):
tensor = torch.randint(-128, 127, [12, 128, 128], dtype=torch.int8)
self.assertRaises(RuntimeError, self.quant_interleave, tensor, torch.int8, 70)
def test_turing_int4_interleave(self):
self.interleave_tester(torch.quint4x2, 75)
def test_turing_int8_interleave(self):
self.interleave_tester(torch.int8, 75)
def test_ampere_80_int4_interleave(self):
self.interleave_tester(torch.quint4x2, 80)
def test_ampere_80_int8_interleave(self):
self.interleave_tester(torch.int8, 80)
def test_ampere_86_int4_interleave(self):
self.interleave_tester(torch.quint4x2, 86)
def test_ampere_86_int8_interleave(self):
self.interleave_tester(torch.int8, 86)
def test_add_bias_interleave_int4(self):
# packed repr for -7 to 8
packed_int4s = torch.tensor([[-104, -70, -36, -2, 16, 50, 84, 118]], dtype=torch.int8)
actual_processed_int4s = self.add_bias_and_interleave_int4s(packed_int4s)
# Packed repr for preprocessed cuda input (computed by hand)
expected_processed_int4 = torch.tensor([[32, 100, 49, 117, -88, -20, -71, -3]], dtype=torch.int8)
torch.testing.assert_close(actual_processed_int4s, expected_processed_int4, rtol=0, atol=0)
def test_add_bias_interleave_int8(self):
int8s = torch.tensor([[-104, -70, -36, 127, 16, 50, 84, 118]], dtype=torch.int8)
actual_processed_int8s = self.add_bias_and_interleave_int8s(int8s)
# Packed repr for preprocessed cuda input (computed by hand)
tmp = torch.tensor([[-104, -36, -70, 127, 16, 84, 50, 118]], dtype=torch.int32) + 128
expected_processed_int8 = tmp.to(torch.int8)
torch.testing.assert_close(actual_processed_int8s, expected_processed_int8, rtol=0, atol=0)
def transpose_test_helper(self, quant_type):
assert quant_type == torch.int8 or quant_type == torch.quint4x2
experts = [1, 4, 8]
m_shapes = [128, 256, 1024]
n_shapes = [128, 4096]
lower_bound = -128 if quant_type == torch.int8 else -8
upper_bound = 127 if quant_type == torch.int8 else 7
for expert in experts:
for m_shape in m_shapes:
for n_shape in n_shapes:
tensor = torch.randint(lower_bound, upper_bound, [expert, m_shape, n_shape], dtype=torch.int8)
# We want to move the data, but not change the shape. The actual impl just changes to col major.
ref_result = tensor.permute([0, 2, 1]).reshape([expert, m_shape, n_shape])
if quant_type == torch.quint4x2:
tensor = self.pack_int4s(tensor)
act_result = self.subbyte_transpose(tensor, quant_type)
if quant_type == torch.quint4x2:
act_result = self.unpack_packed_int4s(act_result)
torch.testing.assert_close(act_result, ref_result, rtol=0, atol=0)
def test_transpose_int4(self):
self.transpose_test_helper(torch.quint4x2)
def test_transpose_int8(self):
self.transpose_test_helper(torch.int8)
def test_unpack(self):
packed_int4s = torch.tensor([[-104, -70, -36, -2, 16, 50, 84, 118]], dtype=torch.int8)
unpacked_int4s_as_int8 = self.unpack_packed_int4s(packed_int4s)
expected = torch.arange(-8, 8, dtype=torch.int8).reshape([1, 16])
torch.testing.assert_close(unpacked_int4s_as_int8, expected, rtol=0, atol=0)
def test_pack(self):
unpacked_i4s = torch.arange(-8, 8, dtype=torch.int8).reshape([1, 16])
packed_i4s = self.pack_int4s(unpacked_i4s)
expected = torch.tensor([[-104, -70, -36, -2, 16, 50, 84, 118]], dtype=torch.int8)
torch.testing.assert_close(packed_i4s, expected, rtol=0, atol=0)
def test_pack_unpack_identity(self):
initial_vals = torch.randint(-8, 7, [128, 128], dtype=torch.int8)
expected = self.unpack_packed_int4s(self.pack_int4s(initial_vals))
torch.testing.assert_close(initial_vals, expected, rtol=0, atol=0)
if __name__ == '__main__':
unittest.main()
| FasterTransformer-main | tests/weight_only_quant_ops/th_weight_quant_ops_unit_tests.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.