python_code
stringlengths
0
679k
repo_name
stringlengths
9
41
file_path
stringlengths
6
149
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Images2D are used to represent images.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import tensorflow as tf from nvidia_tao_tf1.core.processors import ColorTransform, SpatialTransform class Images2D(collections.namedtuple("Images2D", ["images", "canvas_shape"])): """ Geometric primitive for representing images. The way this is used: 1) Data sources create Images2D tuples with 3D tensors. 2) Dataloader adds batch dimensions, converts the 3D tensors to 5D. 3) Dataloader applies transformations by calling apply(). images (tf.Tensor): A 3D tensor of shape [C, H, W], type tf.float32 and scaled to the range [0, 1]. The dimensions of the tensor are: C: Channel - color channel within a frame (e.g, 0: red, 1: green, 2: blue) H: Height - row index spanning from 0 to height - 1 of a frame. W: Width - column index spanning from 0 to width - 1 of a frame. canvas_shape (Canvas2D): Shape of the canvas on which images reside. """ def apply(self, transformation, **kwargs): """Applies transformation. Note that at this point we are expecting batching to be applied, so the dataset should have two batching dimensions (batch, temporal batch). Args: transformation (Transformation): The transformation to apply. output_image_dtype (tf.dtypes.DType): Output image dtype. Defaults to tf.float32. """ data_format = "channels_first" spatial_transform = SpatialTransform( method="bilinear", background_value=0.5, data_format=data_format, verbose=False, ) # Fold cast into the color transform op. output_dtype = kwargs.get("output_image_dtype") or tf.float32 color_transform = ColorTransform( min_clip=0.0, max_clip=1.0, data_format=data_format, output_dtype=output_dtype, ) images = self.images # Shape inference: combine shape known at graph build time with the shape known # only at runtime. images_shape = images.shape.as_list() runtime_images_shape = tf.shape(input=images) for i, dim in enumerate(images_shape): if dim is None: images_shape[i] = runtime_images_shape[i] batch_size = images_shape[0] sequence_length = images_shape[1] num_channels = images_shape[2] height = images_shape[3] width = images_shape[4] stms = transformation.spatial_transform_matrix # Introduce sequence dimension. stms = tf.expand_dims(stms, axis=1) # Tile along the sequence dimension. stms = tf.tile(stms, (1, sequence_length, 1, 1)) # Flatten batch and sequence dimensions. stms = tf.reshape(stms, [batch_size * sequence_length, 3, 3]) ctms = transformation.color_transform_matrix # Introduce sequence dimension. ctms = tf.expand_dims(ctms, axis=1) # Tile along the sequence dimension. ctms = tf.tile(ctms, (1, sequence_length, 1, 1)) # Flatten batch and sequence dimensions. ctms = tf.reshape(ctms, [batch_size * sequence_length, 4, 4]) canvas_height = transformation.canvas_shape.height[0].shape.as_list()[-1] canvas_width = transformation.canvas_shape.width[0].shape.as_list()[-1] # Flatten batch and sequence dimensions. imgs = tf.reshape( images, [batch_size * sequence_length, num_channels, height, width] ) imgs = spatial_transform(imgs, stms=stms, shape=(canvas_height, canvas_width)) # Enable color augmentations only if the input is 3 channel. if num_channels == 3: imgs = color_transform(imgs, ctms=ctms) # Reshape back to separate batch and sequence dimensions. transformed_images = tf.reshape( imgs, [batch_size, sequence_length, num_channels, canvas_height, canvas_width], ) return Images2D( images=transformed_images, canvas_shape=transformation.canvas_shape ) class LabelledImages2D(collections.namedtuple("LabelledImages2D", ["images", "labels", "shapes"])): """ Geometric primitive for representing images. The way this is used: 1) Data sources create Images2D tuples with 3D tensors. 2) Dataloader adds batch dimensions, converts the 3D tensors to 5D. 3) Dataloader applies transformations by calling apply(). images (tf.Tensor): A 3D tensor of shape [C, H, W], type tf.float32 and scaled to the range [0, 1]. The dimensions of the tensor are: C: Channel - color channel within a frame (e.g, 0: red, 1: green, 2: blue) H: Height - row index spanning from 0 to height - 1 of a frame. W: Width - column index spanning from 0 to width - 1 of a frame. canvas_shape (Canvas2D): Shape of the canvas on which images reside. """ pass
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/types/images2d.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Coordinates2D are used to represent geometric shapes.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections from nvidia_tao_tf1.core import processors class Coordinates2D( collections.namedtuple("Coordinates2D", ["coordinates", "canvas_shape"]) ): """ Geometric primitive for representing coordinates. coordinates (tf.SparseTensor): A 5D SparseTensor of shape [B, T, S, V, C] and type tf.float32. B = Batch - index of an example within a batch. T = Time - timestep/sequence index within an example - matches the index of a frame/image. S = Shape - shape index within a single frame/timestep. Shapes can be e.g. polygons, bounding boxes or polylines depending on the dataset/task. V = Vertex - index of a vertex (point) within a shape. E.g. triangle has 3 vertices. C = Coordinate - index of the coordinate within a vertex. 0: x coordinate, 1: y coordinate. canvas_shape (Canvas2D): Shape of the canvas on which coordinates reside. Coordinates are encoded as sparse tensors using the following scheme: # Values contain (x,y) coordinates stored in a 1D tensor vertices = tf.constant([ # 0th shape: triangle 1, 1, - top left vertex (x=1, y=1) 3, 1, - top right vertex 2, 2, - bottom middle vertex # 1st shape: line 5, 5, - left vertex 10, 10, - right vertex ]) # sparse indices are used to encode which (x, y) coordinates belong to which shape, image, # sequence and batch. indices = tf.constant([ # 0th shape (triangle) coordinates [0, 0, 0, 0, 0], # 0th example, 0th shape, 0th vertex, x coordinate [0, 0, 0, 0, 1], # 0th example, 0th shape, 0th vertex, y coordinate [0, 0, 0, 1, 0], # 0th example, 0th shape, 1st vertex, x coordinate [0, 0, 0, 1, 1], # 0th example, 0th shape, 1st vertex, y coordinate [0, 0, 0, 2, 0], # 0th example, 0th shape, 2nd vertex, x coordinate [0, 0, 0, 2, 1], # 0th example, 0th shape, 2nd vertex, y coordinate # 1th shape (line) coordinates [0, 0, 1, 0, 0], # 1st shape, 0th vertex, x coordinate [0, 0, 1, 0, 1], # 1st shape, 0th vertex, y coordinate [0, 0, 1, 1, 0], # 1st shape, 1st vertex, x coordinate [0, 0, 1, 1, 1], # 1st shape, 1st vertex, y coordinate ], dtype=tf.int64) first_frame_shapes = tf.SparseTensor( indices = tf.reshape(indices, (-1, 5)), values = vertices, # dense_shape encodes: # 0th dim: number of examples within a batch (e.g. 32 if batch size is 32) # 1st dim: max number or timesteps within an example. For image data, this corresponds to # the max number of frames. # 2nd dim: max number of shapes per frame. # 3rd dim: max number of vertices per shape. # 4th dim: number of coordinates per vertex (always 2 for 2D shapes.) dense_shape = tf.constant((1, 1, 2, 3, 2), dtype=tf.int64)) ) """ def apply(self, transform, **kwargs): """ Applies transformation to coordinates and canvas shape. Args: transform (Transform): Transform to apply. Returns: (Coordinates2D): Transformed coordinates. """ polygon_transform = processors.PolygonTransform() transformed_coordinates = polygon_transform( self.coordinates, transform.spatial_transform_matrix ) return Coordinates2D( coordinates=transformed_coordinates, canvas_shape=transform.canvas_shape ) def replace_coordinates(self, new_coords, canvas_shape=None): """ Create new object with new coordinates tensor. Note that we expect shape and countsto remain same. Args: new_coords (SparseTensor): Replacement tensor for coordinates. canvas_shape (Tensor): (optional) canvas_shape. """ return Coordinates2D( coordinates=new_coords, canvas_shape=canvas_shape or self.canvas_shape ) class Coordinates2DWithCounts( collections.namedtuple( "Coordinates2DWithCounts", ["coordinates", "canvas_shape", "vertices_count"] ) ): """ Geometric primitive for representing coordinates, with a vector for counts. coordinates (tf.SparseTensor): A 5D SparseTensor of shape [B, T, S, V, C] and type tf.float32. B = Batch - index of an example within a batch. T = Time - timestep/sequence index within an example - matches the index of a frame/image. S = Shape - shape index within a single frame/timestep. Shapes can be e.g. polygons, bounding boxes or polylines depending on the dataset/task. V = Vertex - index of a vertex (point) within a shape. E.g. triangle has 3 vertices. C = Coordinate - index of the coordinate within a vertex. 0: x coordinate, 1: y coordinate. canvas_shape (Canvas2D): Shape of the canvas on which coordinates reside. vertices_count (tf.SparseTensor): number of vertices per polygon [B, T, S] Coordinates are encoded as sparse tensors using the following scheme: # Values contain (x,y) coordinates stored in a 1D tensor vertices = tf.constant([ # 0th shape: triangle 1, 1, - top left vertex (x=1, y=1) 3, 1, - top right vertex 2, 2, - bottom middle vertex # 1st shape: line 5, 5, - left vertex 10, 10, - right vertex ]) # sparse indices are used to encode which (x, y) coordinates belong to which shape, image, # sequence and batch. indices = tf.constant([ # 0th shape (triangle) coordinates [0, 0, 0, 0, 0], # 0th example, 0th shape, 0th vertex, x coordinate [0, 0, 0, 0, 1], # 0th example, 0th shape, 0th vertex, y coordinate [0, 0, 0, 1, 0], # 0th example, 0th shape, 1st vertex, x coordinate [0, 0, 0, 1, 1], # 0th example, 0th shape, 1st vertex, y coordinate [0, 0, 0, 2, 0], # 0th example, 0th shape, 2nd vertex, x coordinate [0, 0, 0, 2, 1], # 0th example, 0th shape, 2nd vertex, y coordinate # 1th shape (line) coordinates [0, 0, 1, 0, 0], # 1st shape, 0th vertex, x coordinate [0, 0, 1, 0, 1], # 1st shape, 0th vertex, y coordinate [0, 0, 1, 1, 0], # 1st shape, 1st vertex, x coordinate [0, 0, 1, 1, 1], # 1st shape, 1st vertex, y coordinate ], dtype=tf.int64) first_frame_shapes = tf.SparseTensor( indices = tf.reshape(indices, (-1, 5)), values = vertices, # dense_shape encodes: # 0th dim: number of examples within a batch (e.g. 32 if batch size is 32) # 1st dim: max number or timesteps within an example. For image data, this corresponds to # the max number of frames. # 2nd dim: max number of shapes per frame. # 3rd dim: max number of vertices per shape. # 4th dim: number of coordinates per vertex (always 2 for 2D shapes.) dense_shape = tf.constant((1, 1, 2, 3, 2), dtype=tf.int64)) ) """ def apply(self, transform, **kwargs): """ Applies transformation to coordinates and canvas shape. Args: transform (Transform): Transform to apply. Returns: (Coordinates2D): Transformed coordinates. """ polygon_transform = processors.PolygonTransform() transformed_coordinates = polygon_transform( self.coordinates, transform.spatial_transform_matrix ) return Coordinates2DWithCounts( coordinates=transformed_coordinates, canvas_shape=transform.canvas_shape, vertices_count=self.vertices_count, ) def replace_coordinates(self, new_coords, canvas_shape=None): """ Create new object with a new coordinates tensor. Note that we expect shape and counts to remain same. Args: new_coords (SparseTensor): Replacement tensor for coordinates. canvas_shape (Tensor): (optional) canvas_shape. """ return Coordinates2DWithCounts( coordinates=new_coords, canvas_shape=canvas_shape or self.canvas_shape, vertices_count=self.vertices_count, )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/types/coordinates2d.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools from parameterized import parameterized import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.types.tensor_transforms import ( map_and_stack, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.tensor_transforms import ( sparsify_dense_coordinates, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.tensor_transforms import ( vector_and_counts_to_sparse_tensor, ) class TensorTransformsTest(tf.test.TestCase): def test_map_and_stack_0_rows(self): count = 0 with self.session(): indices = tf.range(count) out = map_and_stack(lambda i: [[0, i]], indices) self.assertEqual(None, out.shape) self.assertAllEqual([], out.eval()) @parameterized.expand([[1, [[0, 0]]], [2, [[0, 0], [0, 1]]]]) def test_map_and_stack_multiple_rows(self, count, expected): with self.session(): indices = tf.range(count) out = map_and_stack(lambda i: [[0, i]], indices) self.assertAllEqual(expected, out.eval()) def test_sparsifies_empty_vector(self): classes = tf.constant([]) counts = tf.constant([], tf.int64) with self.session(): sparse = vector_and_counts_to_sparse_tensor(classes, counts).eval() self.assertAllEqual([], sparse.values) self.assertEqual((0, 2), sparse.indices.shape) self.assertAllEqual((0, 0), sparse.dense_shape) @parameterized.expand( [ [["lane"], [1], [[0, 0]]], [["lane", "pole"], [2], [[0, 0], [0, 1]]], [["lane", "pole"], [1, 1], [[0, 0], [1, 0]]], [["lane", "pole", "sign"], [1, 2], [[0, 0], [1, 0], [1, 1]]], [["lane", "pole", "sign"], [2, 1], [[0, 0], [0, 1], [1, 0]]], ] ) def test_sparsifies_vector(self, classes, counts, expected_indices): expected_total_elements = len(counts) expected_max_elements = max(counts) classes = tf.constant(classes) counts = tf.constant(counts, tf.int64) with self.session(): sparse = vector_and_counts_to_sparse_tensor(classes, counts).eval() self.assertAllEqual(classes, sparse.values) self.assertAllEqual(expected_indices, sparse.indices) self.assertEqual(expected_total_elements, sparse.dense_shape[0]) self.assertEqual(expected_max_elements, sparse.dense_shape[1]) def test_sparsifies_empty_coordinates(self): with self.session() as sess: actual = sparsify_dense_coordinates( tf.constant([], dtype=tf.float32), tf.constant([], dtype=tf.int64) ) actual = sess.run(actual) self.assertAllEqual([], actual.values) self.assertAllEqual([0, 3], actual.indices.shape) self.assertAllEqual([0, 0, 0], actual.dense_shape) @parameterized.expand( [ [ # Single point. [[7.0, 7.0]], [1], [[0, 0, 0], [0, 0, 1]], [1, 1, 2], ], [ # Two points. [ # First [7.0, 7.0], # Second [42.0, 7.0], ], [1, 1], [ # First indices [0, 0, 0], [0, 0, 1], # Second indices [1, 0, 0], [1, 0, 1], ], [2, 1, 2], ], [ # Single line. [[0.0, 0.0], [7.0, 7.0]], [2], [[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1]], [1, 2, 2], ], [ # Two lines. [ # First [0.0, 0.0], [7.0, 7.0], # Second [0.0, 7.0], [7.0, 0.0], ], [2, 2], [ # First [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], # Second [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ], [2, 2, 2], ], [ # Point and a line [ # Point [7.0, 7.0], # Line [0.0, 0.0], [7.0, 7.0], ], [1, 2], [ # Point indices [0, 0, 0], [0, 0, 1], # Line indices [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ], [2, 2, 2], ], [ # Line and a point [ # Line [0.0, 0.0], [7.0, 7.0], # Point [7.0, 7.0], ], [2, 1], [ # Line indices [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], # Point indices [1, 0, 0], [1, 0, 1], ], [2, 2, 2], ], [ # Point, Line, Point, Line [ # First point [1.0, 2.0], # First line [1.0, 2.0], [3.0, 4.0], # Second point [2.0, 1.0], # Second line [4.0, 3.0], [2.0, 1.0], ], [1, 2, 1, 2], [ # First point indices [0, 0, 0], [0, 0, 1], # First line indices [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], # Second point indices [2, 0, 0], [2, 0, 1], # Second line indices [3, 0, 0], [3, 0, 1], [3, 1, 0], [3, 1, 1], ], [4, 2, 2], ], ] ) def test_sparsifies_coordinates( self, dense_coordinates, vertex_counts_per_polygon, expected_indices, expected_dense_shape, ): expected_values = list(itertools.chain.from_iterable(dense_coordinates)) dense_coordinates = tf.constant(dense_coordinates) vertex_counts_per_polygon = tf.constant( vertex_counts_per_polygon, dtype=tf.int64 ) with self.session() as sess: actual = sparsify_dense_coordinates( dense_coordinates, vertex_counts_per_polygon ) actual = sess.run(actual) self.assertAllEqual(expected_values, actual.values) self.assertAllEqual(expected_indices, actual.indices) self.assertAllEqual(expected_dense_shape, actual.dense_shape)
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/types/tensor_transforms_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for Images2D.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from parameterized import parameterized import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.types import ( test_fixtures as fixtures, ) class Images2DTest(tf.test.TestCase): @parameterized.expand([[1, 1], [1, 2], [2, 1], [2, 2]]) def test_transform_succeeds(self, example_count, frames_per_example): with self.session() as sess: images = fixtures.make_images2d( example_count=example_count, frames_per_example=frames_per_example, height=604, width=960, ) transformation = fixtures.make_identity_transform( count=example_count, height=604, width=960, timesteps=frames_per_example ) transformed_images = images.apply(transformation) original, transformed = sess.run([images, transformed_images]) self.assertAllClose(original, transformed)
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/types/images2d_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Structure for representing recording session and temporal ordering.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections """ Information related to the recording session from which the example was generated from. Args: uuid (tf.Tensor): Tensor of type tf.string representing the unique identifier of a recording session. camera_name (tf.Tensor): Tensor of type tf.string representing the name of the camera with which the session was recorded. frame_number (tf.Tensor): Tensor of type tf.int32 representing the position of a frame within a recording session (~= video.) This is equivalent to the timestep within a sequence. The higher the frame number the newer the example/frame """ Session = collections.namedtuple("Session", ["uuid", "camera_name", "frame_number"])
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/types/session.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Types used to compose Examples.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from nvidia_tao_tf1.blocks.multi_source_loader.types import test_fixtures from nvidia_tao_tf1.blocks.multi_source_loader.types.bbox_2d_label import ( Bbox2DLabel, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.bbox_2d_label import ( filter_bbox_label_based_on_minimum_dims, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.coordinates2d import ( Coordinates2D, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.coordinates2d import ( Coordinates2DWithCounts, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.images2d import ( Images2D, LabelledImages2D ) from nvidia_tao_tf1.blocks.multi_source_loader.types.images2d_reference import ( Images2DReference, LabelledImages2DReference, LabelledImages2DReferenceVal, set_augmentations, set_augmentations_val, set_auto_resize, set_h_tensor, set_h_tensor_val, set_image_channels, set_image_depth, set_max_side, set_min_side, set_w_tensor, set_w_tensor_val ) from nvidia_tao_tf1.blocks.multi_source_loader.types.legacy import ( empty_polygon_label, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.legacy import PolygonLabel from nvidia_tao_tf1.blocks.multi_source_loader.types.partition_label import ( PartitionLabel, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.polygon2d_label import ( Polygon2DLabel, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.process_markers import ( map_markers_to_orientations, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.process_markers import ( map_orientation_to_markers, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.sequence_example import ( FEATURE_CAMERA, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.sequence_example import ( FEATURE_SESSION, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.sequence_example import ( LABEL_DEPTH_DENSE_MAP, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.sequence_example import ( LABEL_DEPTH_FREESPACE, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.sequence_example import ( LABEL_FREESPACE_REGRESSION, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.sequence_example import ( LABEL_FREESPACE_SEGMENTATION, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.sequence_example import ( LABEL_MAP, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.sequence_example import ( LABEL_OBJECT, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.sequence_example import ( LABEL_PANOPTIC_SEGMENTATION, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.sequence_example import ( LABEL_PATH, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.sequence_example import ( SequenceExample, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.session import Session from nvidia_tao_tf1.blocks.multi_source_loader.types.tensor_transforms import ( map_and_stack, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.tensor_transforms import ( sparsify_dense_coordinates, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.tensor_transforms import ( vector_and_counts_to_sparse_tensor, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.transformed_example import ( TransformedExample, ) from nvidia_tao_tf1.core.types import Canvas2D from nvidia_tao_tf1.core.types import Example __all__ = ( "Bbox2DLabel", "Canvas2D", "Coordinates2D", "Coordinates2DWithCounts", "empty_polygon_label", "Example", "FEATURE_CAMERA", "FEATURE_SESSION", "filter_bbox_label_based_on_minimum_dims", "Images2D", "Images2DReference", "LabelledImages2D", "LabelledImages2DReference", "LabelledImages2DReferenceVal", "set_image_channels", "set_image_depth", "LABEL_DEPTH_DENSE_MAP", "LABEL_DEPTH_FREESPACE", "LABEL_FREESPACE_REGRESSION", "LABEL_FREESPACE_SEGMENTATION", "LABEL_MAP", "LABEL_OBJECT", "LABEL_PANOPTIC_SEGMENTATION", "LABEL_PATH", "map_and_stack", "map_markers_to_orientations", "map_orientation_to_markers", "PartitionLabel", "Polygon2DLabel", "PolygonLabel", "SequenceExample", "Session", "set_augmentations", "set_augmentations_val", "set_auto_resize", "set_image_channels", "set_max_side", "set_min_side", "set_h_tensor", "set_h_tensor_val", "set_w_tensor", "set_w_tensor_val", "sparsify_dense_coordinates", "test_fixtures", "TransformedExample", "vector_and_counts_to_sparse_tensor", )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/types/__init__.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for the TransformedExample datastructure.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from collections import namedtuple from mock import Mock, patch import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.types.sequence_example import ( SequenceExample, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.transformed_example import ( TransformedExample, ) TestInstance = namedtuple("TestInstance", ["apply"]) TestLabel = namedtuple("TestLabel", ["apply"]) class TransformedExampleTest(tf.test.TestCase): def test_apply_recurses_instances(self): with patch.object(TestInstance, "apply") as mocked_apply: example = SequenceExample( instances={"test": TestInstance(apply="test")}, labels={} ) transformation = Mock() transformed = TransformedExample(transformation, example) mocked_apply.assert_not_called() transformed() mocked_apply.assert_called_once_with(transformation) def test_apply_recurses_labels(self): with patch.object(TestLabel, "apply") as mocked_apply: example = SequenceExample( instances={}, labels={"test": TestLabel(apply="test")} ) transformation = Mock() transformed = TransformedExample(transformation, example) mocked_apply.assert_not_called() transformed() mocked_apply.assert_called_once_with(transformation) def test_apply_recurses_lists(self): with patch.object(TestLabel, "apply") as mocked_apply: example = SequenceExample( instances={}, labels={"test": [TestLabel(apply="test")]} ) transformation = Mock() transformed = TransformedExample(transformation, example) mocked_apply.assert_not_called() transformed() mocked_apply.assert_called_once_with(transformation) def test_apply_recurses_sets(self): with patch.object(TestLabel, "apply") as mocked_apply: example = SequenceExample( instances={}, labels={"test": set([TestLabel(apply="test")])} ) transformation = Mock() transformed = TransformedExample(transformation, example) mocked_apply.assert_not_called() transformed() mocked_apply.assert_called_once_with(transformation) def test_apply_recurses_dicts(self): with patch.object(TestLabel, "apply") as mocked_apply: example = SequenceExample( instances={}, labels={"test": {"child": TestLabel(apply="test")}} ) transformation = Mock() transformed = TransformedExample(transformation, example) mocked_apply.assert_not_called() transformed() mocked_apply.assert_called_once_with(transformation) def test_apply_recurses_namedtuples_without_apply(self): ApplylessNamedtuple = namedtuple("ApplylessNamedtuple", ["instance"]) with patch.object(TestLabel, "apply") as mocked_apply: example = SequenceExample( instances={}, labels={"test": ApplylessNamedtuple(instance=TestLabel(apply="test"))}, ) transformation = Mock() transformed = TransformedExample(transformation, example) mocked_apply.assert_not_called() transformed() mocked_apply.assert_called_once_with(transformation) def test_does_not_recurse_into_namedtuples_with_apply(self): # We consider namedtuples with an "apply" method to be a leaf node and stop recursive # application of transformations when we encounter one. TestLabelWithInstance = namedtuple( "TestLabelWithInstance", ["apply", "instance"] ) with patch.object(TestLabelWithInstance, "apply") as mocked_apply, patch.object( TestInstance, "apply" ) as mocked_instance_apply: example = SequenceExample( instances={}, labels={ "test": { "child": TestLabelWithInstance( apply="test", instance=TestInstance(apply="test") ) } }, ) transformation = Mock() transformed = TransformedExample(transformation, example) mocked_apply.assert_not_called() mocked_instance_apply.assert_not_called() transformed() mocked_apply.assert_called_once_with(transformation) mocked_instance_apply.assert_not_called()
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/types/transformed_example_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Simple named tuple for partition label.""" from typing import NamedTuple class PartitionLabel(NamedTuple): """Label for partition.""" value: str
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/types/partition_label.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for the TransformedExample datastructure.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.types import ( test_fixtures as fixtures, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.sequence_example import ( FEATURE_CAMERA, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.sequence_example import ( LABEL_MAP, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.sequence_example import ( SequenceExample, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.sequence_example import ( TransformedExample, ) class SequenceExampleTest(tf.test.TestCase): def test_transform_returns_transformed_example(self): example = fixtures.make_example(height=604, width=960) transformation = fixtures.make_identity_transform( count=2, height=604, width=960 ) transformed = example.transform(transformation) self.assertEqual(TransformedExample, type(transformed)) def test_call_applies_identity_transform(self): example = fixtures.make_example(height=604, width=960) transformation = fixtures.make_identity_transform( count=1, height=604, width=960 ) transformed = example.transform(transformation) with self.session() as sess: example, transformed = sess.run([example, transformed()]) self.assertAllClose( example.labels[LABEL_MAP].vertices, transformed.labels[LABEL_MAP].vertices, ) self.assertAllClose( example.instances[FEATURE_CAMERA].images, transformed.instances[FEATURE_CAMERA].images, ) def test_call_returns_sequence_example(self): example = fixtures.make_example(height=604, width=960) transformation = fixtures.make_identity_transform( count=1, height=604, width=960 ) transformed = example.transform(transformation) example = transformed() self.assertEqual(SequenceExample, type(example))
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/types/sequence_example_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Functions for creating test fixtures.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import random import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.types.coordinates2d import ( Coordinates2DWithCounts, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.images2d import Images2D from nvidia_tao_tf1.blocks.multi_source_loader.types.images2d_reference import ( Images2DReference, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.polygon2d_label import ( Polygon2DLabel, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.sequence_example import ( FEATURE_CAMERA, FEATURE_SESSION, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.sequence_example import ( LABEL_MAP, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.sequence_example import ( SequenceExample, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.session import Session from nvidia_tao_tf1.core.types import Canvas2D, Transform def make_identity_transform(count, height, width, timesteps=1): """Return a batch of identity Transforms for count examples.""" return Transform( canvas_shape=make_canvas2d(count, height, width, timesteps=timesteps), color_transform_matrix=tf.stack( [tf.eye(4, dtype=tf.float32) for _ in range(count)] ), spatial_transform_matrix=tf.stack( [tf.eye(3, dtype=tf.float32) for _ in range(count)] ), ) def make_canvas2d(count, height, width, timesteps=1): """Return a batch of Canvas2D for count Examples. Args: count (int or tensor): Number of examples (batch size). height (int): Height of the canvas in pixels. width (int): Width of the canvas in pixels. timesteps (int): Sequence dimension size. Returns: Canvas2D. """ height_shape = [height] width_shape = [width] if timesteps: height_shape = [timesteps] + height_shape width_shape = [timesteps] + width_shape add_count = False if type(count) == int: if count > 0: add_count = True elif count is not None: add_count = True if add_count: height_shape = [count] + height_shape width_shape = [count] + width_shape return Canvas2D(height=tf.zeros(height_shape), width=tf.zeros(width_shape)) def make_coordinates2d( shapes_per_frame, height, width, coordinates_per_polygon=3, coordinate_values=None ): """ Create a batch of sparse labels. Each example can contain a different number of frames. Args: shapes_per_frame (list[list[int]]): List of lists containing the number of shapes to include in each frame. E.g. [[1, 2], [4, 4, 4]] - Two examples, where the first one contains 2 frames (first has 1 shape, second 2) and the second example contains 3 frames (each with 4 shapes). height (int): Height of the canvas on which shapes reside. width (int): Width of the canvas on which shapes reside. coordinates_per_polygon (int): Number of the coordinates in each polygon. coordinate_values (list): List containing values of coordinates. Returns: (Coordinates2DWithCounts): Coordinates generated based on the passed in arguments. """ if shapes_per_frame is None: shapes_per_frame = [[1]] indices = [] coordinates = [] example_count = len(shapes_per_frame) max_frame_count = 0 max_shape_count = 0 coordinate_counts = [] for example_index in range(example_count): example_frame_shape_counts = shapes_per_frame[example_index] frame_count = len(example_frame_shape_counts) max_frame_count = max(max_frame_count, frame_count) for frame_index in range(frame_count): frame_shape_count = example_frame_shape_counts[frame_index] max_shape_count = max(frame_shape_count, max_shape_count) for shape_index in range(frame_shape_count): coordinate_counts.append(coordinates_per_polygon) for vertex_index in range(coordinates_per_polygon): coordinates.append( [ float(random.randint(0, width) / 2), float(random.randint(0, height)) / 2, ] ) for coordinate_index in [0, 1]: indices.append( [ example_index, frame_index, shape_index, vertex_index, coordinate_index, ] ) if coordinate_values is not None: assert len(coordinate_values) == len(coordinates) coordinates = coordinate_values dense_coordinates = tf.constant(coordinates, dtype=tf.float32) sparse_indices = tf.constant(indices, dtype=tf.int64) dense_shape = tf.constant( ( example_count, max_frame_count, max_shape_count, coordinates_per_polygon, 2, # 2D: (x, y) ), dtype=tf.int64, ) sparse_coordinates = tf.SparseTensor( indices=sparse_indices, values=tf.reshape(dense_coordinates, (-1,)), dense_shape=dense_shape, ) vertices_count = tf.SparseTensor( indices=tf.constant( [[0, 0, j] for j in range(len(coordinate_counts))], dtype=tf.int64 ), values=tf.constant(coordinate_counts), dense_shape=tf.constant([1, 1, len(coordinate_counts)], dtype=tf.int64), ) return Coordinates2DWithCounts( coordinates=sparse_coordinates, canvas_shape=make_canvas2d( example_count, height, width, timesteps=max_frame_count ), vertices_count=vertices_count, ) def make_single_coordinates2d(shape_count, height, width, coordinates_per_polygon=3): """ Create a single sparse label. Args: shape_count (int): Number of shapes to include. height (int): Height of the canvas on which shapes reside. width (int): Width of the canvas on which shapes reside. coordinates_per_polygon (int): Number of the coordinates in each polygon. Returns: (Coordinates2DWithCounts): Coordinates generated based on the passed in arguments. """ indices = [] coordinates = [] coordinate_counts = [] for shape_index in range(shape_count): coordinate_counts.append(coordinates_per_polygon) for vertex_index in range(coordinates_per_polygon): coordinates.append( [ float(random.randint(0, width) / 2), float(random.randint(0, height)) / 2, ] ) for coordinate_index in [0, 1]: indices.append([shape_index, vertex_index, coordinate_index]) dense_coordinates = tf.constant(coordinates, dtype=tf.float32) sparse_indices = tf.constant(indices, dtype=tf.int64) dense_shape = tf.constant( (shape_count, coordinates_per_polygon, 2), dtype=tf.int64 # 2D: (x, y) ) sparse_coordinates = tf.SparseTensor( indices=sparse_indices, values=tf.reshape(dense_coordinates, (-1,)), dense_shape=dense_shape, ) vertices_count = tf.SparseTensor( indices=tf.constant( [[0, j] for j in range(len(coordinate_counts))], dtype=tf.int64 ), values=tf.constant(coordinate_counts), dense_shape=tf.constant([1, len(coordinate_counts)], dtype=tf.int64), ) return Coordinates2DWithCounts( coordinates=sparse_coordinates, canvas_shape=make_canvas2d(0, height, width), vertices_count=vertices_count, ) def make_tags(tags): """ Create a sparse tensor representing tags associated with shapes. A shape can have variable number of tags associated with it. Tag can represent any value like a class or attribute. Args: tags (list[list[list[list[T]]]): List of tags of type T that need to be convertible to tf.DType. The depth into this structure encodes (from outermost to innermost): 0: Examples 1: Frames within an example. 2: Shapes within a frame. 3: Tags associated with a shape Returns: (tf.SparseTensor): A tensor encoding the passed in tags. The fields of this sparse tensor follow the following encoding: values: indices: A dense tensor of shape [E, F, S, C] and type tf.int64 where: E = Example F = Frame S = Shape that the classes are associated with. T = Tags. dense_shape: A dense tensor of shape (E, MF, MS, MT) and type T where E: Example count MF: Maximum frame count in indices. MS: Maximum shape count in indices. MT: Maximum tag count in indices. """ indices = [] values = [] max_frame_count = 0 max_shape_count = 0 max_tag_count = 0 example_count = len(tags) for example_index in range(example_count): frames = tags[example_index] frame_count = len(frames) max_frame_count = max(frame_count, max_frame_count) for frame_index in range(frame_count): shapes = frames[frame_index] shape_count = len(shapes) max_shape_count = max(shape_count, max_shape_count) for shape_index in range(shape_count): shape_tags = shapes[shape_index] tag_count = len(shape_tags) max_tag_count = max(tag_count, max_tag_count) values.extend(shape_tags) for tag_index in range(tag_count): indices.append([example_index, frame_index, shape_index, tag_index]) values = tf.constant(values) indices = tf.constant(indices, dtype=tf.int64) dense_shape = tf.constant( (example_count, max_frame_count, max_shape_count, max_tag_count), dtype=tf.int64 ) return tf.SparseTensor( indices=indices, values=tf.reshape(values, (-1,)), dense_shape=dense_shape ) def make_single_tags(tags): """ Create a sparse tensor representing tags associated with shapes. A shape can have variable number of tags associated with it. Tag can represent any value like a class or attribute. Args: tags (list[list[T]]): List of tags of type T that need to be convertible to tf.DType. The depth into this structure encodes (from outermost to innermost): 0: Shapes within a frame. 1: Tags associated with a shape Returns: (tf.SparseTensor): A tensor encoding the passed in tags. The fields of this sparse tensor follow the following encoding: values: indices: A dense tensor of shape [S, C] and type tf.int64 where: S = Shape that the classes are associated with. T = Tags. dense_shape: A dense tensor of shape (MS, MT) and type T where MS: Maximum shape count in indices. MT: Maximum tag count in indices. """ indices = [] values = [] max_frame_count = 0 max_shape_count = 0 max_tag_count = 0 frame_count = len(tags) max_frame_count = max(frame_count, max_frame_count) for frame_index in range(frame_count): shapes = tags[frame_index] shape_count = len(shapes) max_shape_count = max(shape_count, max_shape_count) for shape_index in range(shape_count): shape_tags = shapes[shape_index] tag_count = len(shape_tags) max_tag_count = max(tag_count, max_tag_count) values.extend(shape_tags) for tag_index in range(tag_count): indices.append([shape_index, tag_index]) values = tf.constant(values) indices = tf.constant(indices, dtype=tf.int64) dense_shape = tf.constant((max_shape_count, max_tag_count), dtype=tf.int64) return tf.SparseTensor( indices=indices, values=tf.reshape(values, (-1,)), dense_shape=dense_shape ) def make_polygon2d_label( shapes_per_frame, shape_classes, shape_attributes, height, width, coordinates_per_polygon=3, coordinate_values=None, ): """ Make a Polygon2DLabel. Args: shapes_per_frame (list[list[int]]): List of lists containing the number of shapes to include in each frame. E.g. [[1, 2], [4, 4, 4]] - Two examples, where the first one contains 2 frames (first has 1 shape, second 2) and the second example contains 3 frames (each with 4 shapes). shape_classes (list[T]): Classes of type T associated with each shape in shapes_per_frame. T needs to be a type convertible to tf.DType. shape_attributes (list[T]): Attributes of type T associated with each shape in shapes_per_frame. T needs to be a type convertible to tf.DType. height (int): Height of the canvas on which shapes reside. width (int): Width of the canvas on which shapes reside. coordinates_per_polygon (int): Number of the coordinates in each polygon. coordinate_values (list[float]): Coordinate values of the polygon of type float. Returns: (Polygon2DLabel): A label with polygons and their associated classes and attributes. """ example_count = len(shapes_per_frame) classes = [] attributes = [] for example_index in range(example_count): example_frame_shapes = shapes_per_frame[example_index] example_frame_count = len(example_frame_shapes) example_classes = [] example_attributes = [] for frame_index in range(example_frame_count): frame_classes = [] frame_attributes = [] shape_count = example_frame_shapes[frame_index] for _ in range(shape_count): frame_classes.append(shape_classes) frame_attributes.append(shape_attributes) example_classes.append(frame_classes) example_attributes.append(frame_attributes) classes.append(example_classes) attributes.append(example_attributes) return Polygon2DLabel( vertices=make_coordinates2d( shapes_per_frame=shapes_per_frame, height=height, width=width, coordinates_per_polygon=coordinates_per_polygon, coordinate_values=coordinate_values, ), classes=make_tags(classes), attributes=make_tags(attributes), ) def make_images2d(example_count, frames_per_example, height, width): """ Create a batch of Image2D. Args: example_count (int or tensor): Number of examples (batch size). frames_per_example (int): Number of frames within each example. height (int): Height of the image in pixels. width (int): Width of the image in pixels. Returns: (Images2D): Images where the images property has a tf.Tensor of type tf.float32 and shape [example_count, frames_per_example, 3, height, width]. """ shape = [] if type(example_count) == int: if example_count > 0: shape.append(example_count) elif example_count is not None: shape.append(example_count) if frames_per_example: shape.append(frames_per_example) shape.extend([3, height, width]) return Images2D( images=tf.ones(shape, tf.float32), canvas_shape=make_canvas2d(example_count, height, width, frames_per_example), ) def make_images2d_reference(example_count, frames_per_example, height, width): """ Create a batch of Image2DReferences. Args: example_count (int or tensor): Number of examples (batch size). frames_per_example (int): Number of frames within each example. height (int): Height of the image in pixels. width (int): Width of the image in pixels. Returns: (Images2DReference): Image references where the images property has a tf.Tensor of type tf.float32 and shape [example_count, frames_per_example, 3, height, width]. """ shape = [] if type(example_count) == int: if example_count > 0: shape.append(example_count) elif example_count is not None: shape.append(example_count) if frames_per_example: shape.append(frames_per_example) path = tf.constant("test_path", dtype=tf.string) extension = tf.constant(".fp16", dtype=tf.string) if shape: path = tf.broadcast_to(path, shape) extension = tf.broadcast_to(extension, shape) return Images2DReference( path=path, extension=extension, canvas_shape=make_canvas2d(example_count, height, width, frames_per_example), input_height=tf.constant(height, dtype=tf.int32), input_width=tf.constant(width, dtype=tf.int32), ) def make_example( height, width, example_count=1, shapes_per_frame=None, coordinates_per_polygon=3, coordinate_values=None, use_images2d_reference=False, ): """ Create a batch of SequenceExamples. Args: height (int): Height of the images and labels to create. width (int): Width of the images and labels to create. example_count (int or tensor): Number of examples (batch size). shapes_per_frame (list[list[int]]): List of lists containing the number of shapes to include in each frame. E.g. [[1, 2], [4, 4, 4]] - Two examples, where the first one contains 2 frames (first has 1 shape, second 2) and the second example contains 3 frames (each with 4 shapes). coordinates_per_polygon (int): Number of the coordinates in each polygon. coordinate_values (list): List containing values of coordinates. use_images2d_reference (boolean): If True, construct examples with Images2DReference. If False, construct with Images2D. Returns: (SequenceExample): Sequence examples configured based on parameters. """ image_func = make_images2d_reference if use_images2d_reference else make_images2d return SequenceExample( instances={ FEATURE_CAMERA: image_func( example_count=example_count, frames_per_example=1, height=height, width=width, ), FEATURE_SESSION: Session( uuid=tf.constant("session_uuid"), camera_name=tf.constant("camera_name"), frame_number=tf.constant(0), ), }, labels={ LABEL_MAP: Polygon2DLabel( vertices=make_coordinates2d( shapes_per_frame=shapes_per_frame, height=height, width=width, coordinates_per_polygon=coordinates_per_polygon, coordinate_values=coordinate_values, ), classes=make_tags([[[["lane"]]]]), attributes=make_tags([[[["left", "exit"]]]]), ) }, ) def make_example_3d(height, width, label_name=LABEL_MAP): """ Create a SequenceExample that does not have a time and batch dimension. Args: height (int): Height of the images and labels to create. width (int): Width of the images and labels to create. label_name (string): Name of the Polygon2DLabel to create. Returns: (SequenceExample): Sequence examples configured based on parameters. """ return SequenceExample( instances={ FEATURE_CAMERA: make_images2d( example_count=0, frames_per_example=0, height=height, width=width ), FEATURE_SESSION: Session( uuid=tf.constant("session_uuid"), camera_name=tf.constant("camera_name"), frame_number=tf.constant(0), ), }, labels={ label_name: Polygon2DLabel( vertices=make_single_coordinates2d(3, height=height, width=width), classes=make_single_tags([["lane", "lane", "lane"]]), attributes=make_single_tags([[["left", "exit", "entry"]]]), ) }, )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/types/test_fixtures.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Example whose transformation has been delayed.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from collections import namedtuple class TransformedExample( namedtuple("TransformedExample", ["transformation", "example"]) ): """Container for an example and the transformation that can be applied to it. Args: transformation (Transformation): Transformation which will be applied by __call__. example (SequenceExample): Example that the transformation applies to. """ def __call__(self, **kwargs): """Return original type after applying transformations.""" return self._apply_recursive(self.example, self.transformation, **kwargs) def _apply_recursive(self, value, transformation, **kwargs): """ Apply transformations to tf.data.Dataset compatible value. * Transformation will be applied recursively to members of container types (dict, list, namedtuple). * Transformations are applied to types that have an apply method. """ def _is_namedtuple(value): """Return true if value is a namedtuple.""" return isinstance(value, tuple) and hasattr(value, "_fields") # Call apply only if implemented on a namedtuple apply_op = getattr(value, "apply", None) if _is_namedtuple(value) and (apply_op is not None and callable(apply_op)): return apply_op(transformation, **kwargs) if isinstance(value, (list, set)): return [self._apply_recursive(v, transformation, **kwargs) for v in value] if isinstance(value, dict): return { k: self._apply_recursive(v, transformation, **kwargs) for (k, v) in value.items() } if _is_namedtuple(value): return value._make( [ self._apply_recursive(field, transformation, **kwargs) for field in value._asdict().values() ] ) # Stop recursion - unknown non-collection types are treated as leaf nodes. return value
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/types/transformed_example.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for Polygon2DLabel""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.types import ( test_fixtures as fixtures, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.coordinates2d import ( Coordinates2DWithCounts, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.polygon2d_label import ( Polygon2DLabel, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.sparse_tensor_builder import ( SparseTensorBuilder, ) ### # Convenience shorthands for building out the coordinate tensors ### class C(SparseTensorBuilder): """Coordinates.""" pass class Poly(SparseTensorBuilder): """Polygon/Polyline.""" pass class Frame(SparseTensorBuilder): """Frame.""" pass class Timestep(SparseTensorBuilder): """Timestep.""" pass class Batch(SparseTensorBuilder): """Batch.""" pass class Label(SparseTensorBuilder): """Class Label.""" pass ### def _get_label( label_builder, label_classes_builder, label_vertices_counts, label_attributes ): sparse_example = label_builder.build(val_type=tf.float32) sparse_classes = label_classes_builder.build(val_type=tf.int32) sparse_counts = label_vertices_counts.build(val_type=tf.int32) sparse_attributes = label_attributes.build(val_type=tf.int32) coordinates = Coordinates2DWithCounts( coordinates=sparse_example, canvas_shape=tf.zeros(1), vertices_count=sparse_counts, ) label = Polygon2DLabel( vertices=coordinates, classes=sparse_classes, attributes=sparse_attributes ) return label class Polygon2DLabelTest(tf.test.TestCase): def test_reshape_to_4d(self): # Want sparse coordinates and classes to overlap but also have coordinates without classes # and classes without coordinates. coordinates = fixtures.make_coordinates2d( # Examples [ # Frames [0], [ # Shapes 0, 1, 2, ], ], 100, 200, ) classes = fixtures.make_tags( # Examples [[[[1]]], [[[0], [1]], [[0], [1]], [[0], [1]]]] # Frames # Shapes ) attributes = fixtures.make_tags( [ # Examples [[[1]]], [ # Frames [[0, 1, 3], [10]], # Shapes [[0, 1, 3], [10]], # Shapes [[0, 1, 3], [10]], # Shapes ], ] ) label = Polygon2DLabel( vertices=coordinates, classes=classes, attributes=attributes ) # classes and attributes with self.cached_session(): reshaped = label.compress_frame_dimension() # TODO(ehall): verify that this is preserving image, class index correlations between # polygons w/sparse generator from other branch. self.assertAllEqual(reshaped.vertices.coordinates.dense_shape, [6, 2, 3, 2]) self.assertAllEqual(reshaped.classes.dense_shape, [6, 2, 1]) self.assertAllEqual(reshaped.attributes.dense_shape, [6, 2, 3]) # Verify that empty attributes are handled properly during compression and do not raise errors. def test_handling_of_empty_attributes(self): coordinates = fixtures.make_coordinates2d( # Examples [ # Frames [0], [ # Shapes 0, 1, 2, ], ], 100, 200, ) classes = fixtures.make_tags( # Examples [[[[1]]], [[[0], [1]], [[0], [1]], [[0], [1]]]] # Frames # Shapes ) empty_attributes = tf.SparseTensor( indices=tf.zeros((0, 4), tf.int64), values=tf.constant([], dtype=tf.string), dense_shape=tf.constant((0, 0, 0, 0), dtype=tf.int64), ) label = Polygon2DLabel( vertices=coordinates, classes=classes, attributes=empty_attributes ) # classes and attributes with self.cached_session(): reshaped = label.compress_frame_dimension() self.assertAllEqual(reshaped.vertices.coordinates.dense_shape, [6, 2, 3, 2]) self.assertAllEqual(reshaped.classes.dense_shape, [6, 2, 1]) self.assertAllEqual(reshaped.attributes.dense_shape, [0, 0, 0]) def test_slice_to_last_frame(self): example = Batch( Timestep(Frame(Poly(C(1, 1), C(2, 2))), Frame(Poly(C(3, 3), C(4, 4)))) ) example_classes = Batch(Timestep(Frame(Label(0)), Frame(Label(1)))) example_v_counts = Batch(Timestep(Frame(Poly(2)))) example_attributes = Batch( Timestep(Frame(Poly(Label(0))), Frame(Poly(Label(1)))) ) example = _get_label( example, example_classes, example_v_counts, example_attributes ) example = example.slice_to_last_frame() target_example = Batch(Timestep(Frame(Poly(C(3, 3), C(4, 4))))) target_classes = Batch(Timestep(Frame(Label(1)))) target_v_counts = Batch(Timestep(Frame(Poly(2)))) target_attributes = Batch(Timestep(Frame(Poly(Label(1))))) target_example = _get_label( target_example, target_classes, target_v_counts, target_attributes ) self._assertSparseEqual( target_example.vertices.coordinates, example.vertices.coordinates ) self._assertSparseEqual(target_example.classes, example.classes) self._assertSparseEqual(target_example.attributes, example.attributes) def _assertSparseEqual(self, expected, actual): """Assert that two sparse tensors match. Args: expected (tf.SparseTensor): Expected tensor. actual (tf.SparseTensor): Actual tensor. """ self.assertAllEqual(expected.indices, actual.indices) self.assertAllEqual(expected.dense_shape, actual.dense_shape) self.assertAllClose(expected.values, actual.values)
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/types/polygon2d_label_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Images2DReference is used to represent references to image assets.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.types.images2d import ( Images2D, LabelledImages2D ) _CHANNELS = 3 _DEPTH = 8 _MIN_SIDE = 0 _MAX_SIDE = 0 _AUTO_RESIZE = False # input height tensor for image height H_TENSOR = None H_TENSOR_VAL = None # input width tensor for image width W_TENSOR = None W_TENSOR_VAL = None # augmentations AUGMENTATIONS = None AUGMENTATIONS_VAL = None # TODO(vkallioniemi): The loadig fuctionality here is mostly copied from modulus' # LoadAndDecodeFrame def set_image_channels(channel_count): """Set the image channel count.""" global _CHANNELS # noqa pylint: disable=W0603 _CHANNELS = channel_count def set_min_side(min_side): """set the minimal side of images.""" global _MIN_SIDE # noqa pylint: disable=W0603 _MIN_SIDE = min_side def set_max_side(max_side): """set the maximal side of images.""" global _MAX_SIDE # noqa pylint: disable=W0603 _MAX_SIDE = max_side def set_auto_resize(auto_resize): """set the automatic resize flag.""" global _AUTO_RESIZE # noqa pylint: disable=W0603 _AUTO_RESIZE = auto_resize def set_h_tensor(h_tensor): """Set the height tensor.""" global H_TENSOR # noqa pylint: disable=W0603 assert (H_TENSOR is None) or (H_TENSOR is h_tensor), ( "H_TENSOR can only be assigned once." ) H_TENSOR = h_tensor def set_h_tensor_val(h_tensor): """Set the height tensor.""" global H_TENSOR_VAL # noqa pylint: disable=W0603 assert (H_TENSOR_VAL is None) or (H_TENSOR_VAL is h_tensor), ( "H_TENSOR_VAL can only be assigned once." ) H_TENSOR_VAL = h_tensor def set_w_tensor(w_tensor): """Set the width tensor.""" global W_TENSOR # noqa pylint: disable=W0603 assert (W_TENSOR is None) or (W_TENSOR is w_tensor), ( "W_TENSOR can only be assigned once." ) W_TENSOR = w_tensor def set_w_tensor_val(w_tensor): """Set the width tensor.""" global W_TENSOR_VAL # noqa pylint: disable=W0603 assert (W_TENSOR_VAL is None) or (W_TENSOR_VAL is w_tensor), ( "W_TENSOR_VAL can only be assigned once." ) W_TENSOR_VAL = w_tensor def set_image_depth(depth): """Set image depth.""" global _DEPTH # noqa pylint: disable=W0603 assert depth in [8, 16], ( f"Image depth only supports 8 and 16, got {depth}" ) _DEPTH = depth def set_augmentations(augmentations): """set augmentations list.""" global AUGMENTATIONS # noqa pylint: disable=W0603 assert (AUGMENTATIONS is None) or (AUGMENTATIONS is augmentations), ( "AUGMENTATIONS can only be assigned once." ) AUGMENTATIONS = augmentations def set_augmentations_val(augmentations): """set augmentations list.""" global AUGMENTATIONS_VAL # noqa pylint: disable=W0603 assert (AUGMENTATIONS_VAL is None) or (AUGMENTATIONS_VAL is augmentations), ( "AUGMENTATIONS_VAL can only be assigned once." ) AUGMENTATIONS_VAL = augmentations def decode_image(data, channels, extension, depth=8): """decode jpeg and png images.""" is_jpeg = tf.reduce_any( input_tensor=[ tf.equal(extension, ".jpg"), tf.equal(extension, ".jpeg"), ] ) # setting dct_method='INTEGER_ACCURATE' will produce same result # as PIL/openCV out_jpeg = tf.image.decode_jpeg( data, channels, dct_method='INTEGER_ACCURATE' ) # Only PNG can support 16-bit if depth == 16: # Make sure the 2 branches of tf.cond have the same data type out_jpeg = tf.cast(out_jpeg, tf.dtypes.uint16) out_png = tf.image.decode_png( data, channels, tf.dtypes.uint16 ) else: out_png = tf.image.decode_png( data, channels, tf.dtypes.uint8 ) return tf.cond(is_jpeg, lambda: out_jpeg, lambda: out_png) class Images2DReference( collections.namedtuple( "Images2DReference", ["path", "extension", "canvas_shape", "input_height", "input_width"], ) ): """Reference to an image. Args: path (tf.Tensor): Tensor of type tf.string that contains the path to an image. The shape of this tensor can be either 1D (temporally batched) or 2D (temporal + mini batch). extension (tf.Tensor): Tensor of type tf.string that contains the file extension of the image referenced by `path`. The extension is used to determine the encoding of the image. The shape of this tensor can be either 1D (temporally batched) or 2D (temporal + mini batch). canvas_shape (Canvas2D): Structure that contains the height and width of the output image. The static shapes of the width and height fields are used to represent the width and height of the image so that the static shape of the loaded image can be set correctly. The shape of the tensors contained in this structure can be either # 1D (temporally batched) or 2D (temporal + mini batch). input_height (tf.Tensor): 1-D or 2-D tensor for the heights of the images on disk. The 1-D and 2-D case respectively correspond to the absence and presence of temporal batching. input_width (tf.Tensor): 1-D or 2-D tensor for the widths of the images on disk. The 1-D and 2-D case respectively correspond to the absence and presence of temporal batching. """ def load(self, output_dtype): """ Load referenced images. Arguments: output_dtype (tf.dtypes.DType): Output image dtype. Returns: (tf.Tensor) Tensor representing the loaded image. The type will be tf.float16 with height matching the static size of canvas_shape.height and width having the same size as the static size of canvas_shape.width. The shape of the tensor will be 4D when `paths` is a 1D tensor and 5D when `paths` is a 2D tensor. """ # The _decode_* functions below only handle floating point types for now. assert output_dtype in (tf.float32, tf.float16, tf.uint8) # TODO (weich): The delayed dtype cast/normalization currently doesn't support eager mode. # For eager mode, we force the output dtype to be float and apply normalization here # regardless of the image type. if tf.executing_eagerly(): output_dtype = tf.float32 path_shape = self.path.shape.as_list() image_count = 1 for d in path_shape: if d is None: raise ValueError( """Shape of the input was not known statically, i.e one of the dimensions of image paths tensor was 'None': {}. This can happen, for example if tf.dataset.batch(n, drop_remainder=False), as then the batch dimension can be variable. Image loader requires statically known batch dimensions, so set drop_remainder=True, or use set_shape().""".format( path_shape ) ) image_count *= d # Flatten tensors so that we need only a single code path to load from (via map_fn) both # 1D and 2D paths. paths_flat = tf.reshape(self.path, (-1,)) extension_flat = tf.reshape(self.extension, (-1,)) input_height_flat = tf.reshape(self.input_height, (-1,)) input_width_flat = tf.reshape(self.input_width, (-1,)) channels = _CHANNELS images = [] for index in range(image_count): images.append( self._load_and_decode( path=paths_flat[index], extension=extension_flat[index], height=input_height_flat[index], width=input_width_flat[index], output_dtype=output_dtype, channels=channels, min_side=_MIN_SIDE, max_side=_MAX_SIDE, ) ) images = tf.stack(images, 0) # Images are always loaded as a 4D tensor because we flatten the paths before calling # map_fn (to avoid two code paths.) The original paths could have been a 1D tensor # (temporally batched) or a 2D tensor (temporal + mini batched.) We slice the # spatial shape of the image from the loaded 4D tensor and concatenate it with the # batch and/or time demensions of the paths to match the batch & time dimensionality of # the paths. if _MIN_SIDE == 0: # _MIN_SIDE == 0 is the normal case, image loader works in static shape images_shape = self.path.shape.concatenate(images.shape[1:]).as_list() images_shape = [-1 if dim is None else dim for dim in images_shape] images = tf.reshape(images, images_shape) return Images2D(images=images, canvas_shape=self.canvas_shape) def _load_and_decode(self, path, extension, height, width, output_dtype, channels=3, min_side=0, max_side=0): if _AUTO_RESIZE: image = self._decode_and_resize_image( height=height, width=width, frame_path=path, channels=channels, output_dtype=output_dtype, extension=extension ) else: image = self._decode_and_pad_image( height=height, width=width, frame_path=path, channels=channels, output_dtype=output_dtype, extension=extension ) # if in min_side mode, resize and keep AR if min_side > 0: image_hwc = tf.transpose(a=image, perm=[1, 2, 0]) target_size = self._calculate_target_size(height, width) image_resized = tf.image.resize_images(image_hwc, target_size) image = tf.cast(tf.transpose(a=image_resized, perm=[2, 0, 1]), output_dtype) return image def _calculate_target_size(self, height, width): """Calculate the target size for resize and keep aspect ratio.""" height = tf.cast(height, tf.float32) width = tf.cast(width, tf.float32) min_side = tf.cast(tf.convert_to_tensor(_MIN_SIDE, dtype=tf.int32), tf.float32) # resize the smaller side to target_size, keep aspect ratio target_size = tf.cond( pred=tf.less_equal(height, width), true_fn=lambda: tf.stack([ tf.cast(min_side, tf.int32), tf.cast((min_side / height) * width, tf.int32)]), false_fn=lambda: tf.stack([ tf.cast((min_side / width) * height, tf.int32), tf.cast(min_side, tf.int32)]) ) # if the larger side exceeds _MAX_SIDE max_side = tf.cast(tf.convert_to_tensor(_MAX_SIDE, dtype=tf.int32), tf.float32) target_size2 = tf.cond( pred=tf.less_equal(height, width), true_fn=lambda: tf.stack([ tf.cast((max_side / width) * height, tf.int32), tf.cast(max_side, tf.int32)]), false_fn=lambda: tf.stack([ tf.cast(max_side, tf.int32), tf.cast((max_side / height) * width, tf.int32)]) ) target_size = tf.minimum(target_size, target_size2) return target_size def _decode_and_pad_image( self, height, width, frame_path, channels, output_dtype, extension ): """Load and decode JPG/PNG image, and pad.""" data = tf.io.read_file(frame_path) image = decode_image(data, channels, extension) image = tf.transpose(a=image, perm=[2, 0, 1]) if output_dtype == tf.uint8: image = tf.cast(image, output_dtype) else: image = tf.cast(image, output_dtype) / 255 # For JPG and PNG with dtype uint8 we delay normalization to later in the pipeline. if _MIN_SIDE == 0: return self._pad_image_to_right_and_bottom(image) return image def _decode_and_resize_image( self, height, width, frame_path, channels, output_dtype, extension ): """Decode and resize images to target size.""" data = tf.io.read_file(frame_path) image = decode_image(data, channels, extension) image = tf.transpose(a=image, perm=[2, 0, 1]) if output_dtype == tf.uint8: image = tf.cast(image, output_dtype) else: image = tf.cast(image, output_dtype) / 255 # For JPG and PNG with dtype uint8 we delay normalization to later in the pipeline. if _MIN_SIDE == 0: return self._resize_image_to_target_size(image) return image def _decode_and_pad_png(self, height, width, frame_path, channels, output_dtype): """Load and decode a PNG frame, and pad.""" data = tf.io.read_file(frame_path) image = tf.image.decode_image(data, channels=channels) image = tf.transpose(a=image, perm=[2, 0, 1]) if output_dtype == tf.uint8: image = tf.cast(image, output_dtype) else: image = tf.cast(image, output_dtype) / 255 # For JPG and PNG with dtype uint8 we delay normalization to later in the pipeline. return self._pad_image_to_right_and_bottom(image) def _pad_image_to_right_and_bottom(self, image): """Pad image to the canvas shape. NOTE: the padding, if needed, happens to the right and bottom of the input image. Args: image (tf.Tensor): Expected to be a 3-D Tensor in [C, H, W] order. Returns: padded_image (tf.Tensor): 3-D Tensor in [C, H, W] order, where H and W may be different from those of the input (due to padding). """ height = tf.shape(input=image)[1] width = tf.shape(input=image)[2] output_height = tf.shape(input=self.canvas_shape.height)[-1] output_width = tf.shape(input=self.canvas_shape.width)[-1] pad_height = output_height - height pad_width = output_width - width padding = [ [0, 0], # channels. [0, pad_height], # height. [0, pad_width], ] # width. padded_image = tf.pad(tensor=image, paddings=padding, mode="CONSTANT") # This is needed because downstream processors rely on knowing the shape statically. padded_image.set_shape( [_CHANNELS, self.canvas_shape.height.shape[-1], self.canvas_shape.width.shape[-1]] ) return padded_image def _resize_image_to_target_size(self, image): """Resize image to the canvas shape. Args: image (tf.Tensor): Expected to be a 3-D Tensor in [C, H, W] order. Returns: image (tf.Tensor): 3-D Tensor in [C, H, W] order, where H and W may be different from those of the input (due to resize). """ output_height = tf.shape(input=self.canvas_shape.height)[-1] output_width = tf.shape(input=self.canvas_shape.width)[-1] image_original = image image_hwc = tf.transpose(a=image, perm=[1, 2, 0]) image_resized = tf.image.resize_images(image_hwc, [output_height, output_width]) image_resized = tf.cast(tf.transpose(a=image_resized, perm=[2, 0, 1]), image.dtype) # if images are already in target size, do not resize # this can prevent some mistake in tensorflow resize because # if we still resize it the output can be different from original image. # so here we try to keep it untouched if it is already in target size, # this is compatible with old behavior if we manually pad the images # in dataset and it will not do resize. This is a hack to ensure # compatiblity of mAP. no_resize = tf.logical_and( tf.equal(tf.shape(image_original)[1], output_height), tf.equal(tf.shape(image_original)[2], output_width) ) image = tf.cond( no_resize, true_fn=lambda: image_original, false_fn=lambda: image_resized ) image.set_shape( [_CHANNELS, self.canvas_shape.height.shape[-1], self.canvas_shape.width.shape[-1]] ) return image class LabelledImages2DReference( collections.namedtuple( "LabelledImages2DReference", [ "path", "extension", "canvas_shape", "input_height", "input_width", "labels" ], ) ): """Reference to an image. Args: path (tf.Tensor): Tensor of type tf.string that contains the path to an image. The shape of this tensor can be either 1D (temporally batched) or 2D (temporal + mini batch). extension (tf.Tensor): Tensor of type tf.string that contains the file extension of the image referenced by `path`. The extension is used to determine the encoding of the image. The shape of this tensor can be either 1D (temporally batched) or 2D (temporal + mini batch). canvas_shape (Canvas2D): Structure that contains the height and width of the output image. The static shapes of the width and height fields are used to represent the width and height of the image so that the static shape of the loaded image can be set correctly. The shape of the tensors contained in this structure can be either # 1D (temporally batched) or 2D (temporal + mini batch). input_height (tf.Tensor): 1-D or 2-D tensor for the heights of the images on disk. The 1-D and 2-D case respectively correspond to the absence and presence of temporal batching. input_width (tf.Tensor): 1-D or 2-D tensor for the widths of the images on disk. The 1-D and 2-D case respectively correspond to the absence and presence of temporal batching. """ def load(self, output_dtype): """ Load referenced images. Arguments: output_dtype (tf.dtypes.DType): Output image dtype. Returns: (tf.Tensor) Tensor representing the loaded image. The type will be tf.float16 with height matching the static size of canvas_shape.height and width having the same size as the static size of canvas_shape.width. The shape of the tensor will be 4D when `paths` is a 1D tensor and 5D when `paths` is a 2D tensor. """ # The _decode_* functions below only handle floating point types for now. assert output_dtype in (tf.float32, tf.float16, tf.uint8) output_dtype = tf.float32 path_shape = self.path.shape.as_list() image_count = 1 for d in path_shape: if d is None: raise ValueError( """Shape of the input was not known statically, i.e one of the dimensions of image paths tensor was 'None': {}. This can happen, for example if tf.dataset.batch(n, drop_remainder=False), as then the batch dimension can be variable. Image loader requires statically known batch dimensions, so set drop_remainder=True, or use set_shape().""".format( path_shape ) ) image_count *= d # Flatten tensors so that we need only a single code path to load from (via map_fn) both # 1D and 2D paths. paths_flat = tf.reshape(self.path, (-1,)) extension_flat = tf.reshape(self.extension, (-1,)) channels = _CHANNELS depth = _DEPTH images = [] assert image_count == 1 for index in range(image_count): img = self._decode_image( paths_flat[index], channels, depth, output_dtype, extension_flat[index] ) # convert grayscale to RGB if channels == 1: img = tf.image.grayscale_to_rgb(img) sparse_boxes = tf.sparse.reorder(self.labels.vertices.coordinates) gt_labels = tf.reshape(sparse_boxes.values, [-1, 4]) image_width = tf.cast(tf.shape(img)[1], tf.float32) if AUGMENTATIONS is not None: # ratio is only valid for single scale training ratio = W_TENSOR / H_TENSOR img, gt_labels = AUGMENTATIONS(img, gt_labels, ratio, image_width) new_labels = self._update_augmented_labels(gt_labels, sparse_boxes) img = tf.expand_dims(img, axis=0) img.set_shape([1, None, None, 3]) images.append(img) shapes = tf.reshape(tf.shape(images[0])[1:3], (1, 2)) images = tf.image.pad_to_bounding_box( images[0], 0, 0, tf.shape(self.canvas_shape.height)[-1], tf.shape(self.canvas_shape.width)[-1] ) return LabelledImages2D(images=images, labels=new_labels, shapes=shapes) def _get_dense_bboxes(self): coords = tf.sparse.reshape(self.labels.vertices.coordinates, [-1, 4]) bboxes = tf.reshape( tf.sparse.to_dense(coords), (-1, 4) ) h = tf.cast(tf.reshape(self.input_height, []), tf.float32) w = tf.cast(tf.reshape(self.input_width, []), tf.float32) # normalized coordinates for augmentations bboxes /= tf.stack([w, h, w, h], axis=-1) return bboxes def _update_augmented_labels(self, bboxes, sparse_bboxes): sparsed = tf.sparse.SparseTensor( indices=sparse_bboxes.indices, values=tf.reshape(bboxes, [-1]), dense_shape=sparse_bboxes.dense_shape ) new_vertices = self.labels.vertices._replace(coordinates=sparsed) new_labels = self.labels._replace(vertices=new_vertices) return new_labels def _load_and_decode(self, path, extension, height, width, output_dtype, channels=3, min_side=0, max_side=0, h_tensor=None, w_tensor=None): if _AUTO_RESIZE: image = self._decode_and_resize_image( height=height, width=width, frame_path=path, channels=channels, output_dtype=output_dtype, extension=extension ) elif (h_tensor is None) and (w_tensor is None): image = self._decode_and_pad_image( height=height, width=width, frame_path=path, channels=channels, output_dtype=output_dtype, extension=extension ) else: # dynamic shape with h_tensor and w_tensor image = self._decode_and_resize_image( height=height, width=width, frame_path=path, channels=channels, output_dtype=output_dtype, extension=extension, h_tensor=h_tensor, w_tensor=w_tensor ) # if in min_side mode, resize and keep AR if min_side > 0: image_hwc = tf.transpose(a=image, perm=[1, 2, 0]) target_size = self._calculate_target_size(height, width) image_resized = tf.image.resize_images(image_hwc, target_size) image = tf.cast(tf.transpose(a=image_resized, perm=[2, 0, 1]), output_dtype) return image def _calculate_target_size(self, height, width): """Calculate the target size for resize and keep aspect ratio.""" height = tf.cast(height, tf.float32) width = tf.cast(width, tf.float32) min_side = tf.cast(tf.convert_to_tensor(_MIN_SIDE, dtype=tf.int32), tf.float32) # resize the smaller side to target_size, keep aspect ratio target_size = tf.cond( pred=tf.less_equal(height, width), true_fn=lambda: tf.stack([ tf.cast(min_side, tf.int32), tf.cast((min_side / height) * width, tf.int32)]), false_fn=lambda: tf.stack([ tf.cast((min_side / width) * height, tf.int32), tf.cast(min_side, tf.int32)]) ) # if the larger side exceeds _MAX_SIDE max_side = tf.cast(tf.convert_to_tensor(_MAX_SIDE, dtype=tf.int32), tf.float32) target_size2 = tf.cond( pred=tf.less_equal(height, width), true_fn=lambda: tf.stack([ tf.cast((max_side / width) * height, tf.int32), tf.cast(max_side, tf.int32)]), false_fn=lambda: tf.stack([ tf.cast(max_side, tf.int32), tf.cast((max_side / height) * width, tf.int32)]) ) target_size = tf.minimum(target_size, target_size2) return target_size def _decode_and_pad_image( self, height, width, frame_path, channels, output_dtype, extension ): """Load and decode JPG/PNG image, and pad.""" data = tf.io.read_file(frame_path) image = decode_image(data, channels, extension) image = tf.transpose(a=image, perm=[2, 0, 1]) if output_dtype == tf.uint8: image = tf.cast(image, output_dtype) else: image = tf.cast(image, output_dtype) / 255 # For JPG and PNG with dtype uint8 we delay normalization to later in the pipeline. if _MIN_SIDE == 0: return self._pad_image_to_right_and_bottom(image) return image def _decode_and_resize_image( self, height, width, frame_path, channels, output_dtype, extension, h_tensor=None, w_tensor=None ): """Decode and resize images to target size.""" data = tf.io.read_file(frame_path) image = decode_image(data, channels, extension) image = tf.transpose(a=image, perm=[2, 0, 1]) if output_dtype == tf.uint8: image = tf.cast(image, output_dtype) else: image = tf.cast(image, output_dtype) / 255 # For JPG and PNG with dtype uint8 we delay normalization to later in the pipeline. if _MIN_SIDE == 0: return self._resize_image_to_target_size( image, h_tensor=h_tensor, w_tensor=w_tensor ) return image def _decode_image( self, frame_path, channels, depth, output_dtype, extension ): """Decode single image.""" data = tf.io.read_file(frame_path) image = tf.cast(decode_image(data, channels, extension, depth=depth), output_dtype) return image def resize_image( self, image, h_tensor, w_tensor ): """Resize image to target size.""" image_original = image image_resized = tf.image.resize_images(image, tf.stack([h_tensor, w_tensor], axis=0)) # if images are already in target size, do not resize # this can prevent some mistake in tensorflow resize because # if we still resize it the output can be different from original image. # so here we try to keep it untouched if it is already in target size, # this is compatible with old behavior if we manually pad the images # in dataset and it will not do resize. This is a hack to ensure # compatiblity of mAP. no_resize = tf.logical_and( tf.equal(tf.shape(image_original)[0], h_tensor), tf.equal(tf.shape(image_original)[1], w_tensor) ) image = tf.cond( no_resize, true_fn=lambda: image_original, false_fn=lambda: image_resized ) # HWC to CHW image = tf.transpose(image, (2, 0, 1)) return image def _decode_and_pad_png(self, height, width, frame_path, channels, output_dtype): """Load and decode a PNG frame, and pad.""" data = tf.io.read_file(frame_path) image = tf.image.decode_image(data, channels=channels) image = tf.transpose(a=image, perm=[2, 0, 1]) if output_dtype == tf.uint8: image = tf.cast(image, output_dtype) else: image = tf.cast(image, output_dtype) / 255 # For JPG and PNG with dtype uint8 we delay normalization to later in the pipeline. return self._pad_image_to_right_and_bottom(image) def _pad_image_to_right_and_bottom(self, image): """Pad image to the canvas shape. NOTE: the padding, if needed, happens to the right and bottom of the input image. Args: image (tf.Tensor): Expected to be a 3-D Tensor in [C, H, W] order. Returns: padded_image (tf.Tensor): 3-D Tensor in [C, H, W] order, where H and W may be different from those of the input (due to padding). """ height = tf.shape(input=image)[1] width = tf.shape(input=image)[2] output_height = tf.shape(input=self.canvas_shape.height)[-1] output_width = tf.shape(input=self.canvas_shape.width)[-1] pad_height = output_height - height pad_width = output_width - width padding = [ [0, 0], # channels. [0, pad_height], # height. [0, pad_width], ] # width. padded_image = tf.pad(tensor=image, paddings=padding, mode="CONSTANT") # This is needed because downstream processors rely on knowing the shape statically. padded_image.set_shape( [_CHANNELS, self.canvas_shape.height.shape[-1], self.canvas_shape.width.shape[-1]] ) return padded_image def _resize_image_to_target_size(self, image, h_tensor=None, w_tensor=None): """Resize image to the canvas shape. Args: image (tf.Tensor): Expected to be a 3-D Tensor in [C, H, W] order. Returns: image (tf.Tensor): 3-D Tensor in [C, H, W] order, where H and W may be different from those of the input (due to resize). """ if h_tensor is None: output_height = tf.shape(input=self.canvas_shape.height)[-1] else: output_height = h_tensor if w_tensor is None: output_width = tf.shape(input=self.canvas_shape.width)[-1] else: output_width = tf.shape(input=self.canvas_shape.width)[-1] image_original = image image_hwc = tf.transpose(a=image, perm=[1, 2, 0]) image_resized = tf.image.resize_images(image_hwc, [output_height, output_width]) image_resized = tf.cast(tf.transpose(a=image_resized, perm=[2, 0, 1]), image.dtype) # if images are already in target size, do not resize # this can prevent some mistake in tensorflow resize because # if we still resize it the output can be different from original image. # so here we try to keep it untouched if it is already in target size, # this is compatible with old behavior if we manually pad the images # in dataset and it will not do resize. This is a hack to ensure # compatiblity of mAP. no_resize = tf.logical_and( tf.equal(tf.shape(image_original)[1], output_height), tf.equal(tf.shape(image_original)[2], output_width) ) image = tf.cond( no_resize, true_fn=lambda: image_original, false_fn=lambda: image_resized ) if (h_tensor is None) and (w_tensor is None): image.set_shape( [_CHANNELS, self.canvas_shape.height.shape[-1], self.canvas_shape.width.shape[-1]] ) return image class LabelledImages2DReferenceVal( collections.namedtuple( "LabelledImages2DReferenceVal", [ "path", "extension", "canvas_shape", "input_height", "input_width", "labels" ], ) ): """Reference to an image. Args: path (tf.Tensor): Tensor of type tf.string that contains the path to an image. The shape of this tensor can be either 1D (temporally batched) or 2D (temporal + mini batch). extension (tf.Tensor): Tensor of type tf.string that contains the file extension of the image referenced by `path`. The extension is used to determine the encoding of the image. The shape of this tensor can be either 1D (temporally batched) or 2D (temporal + mini batch). canvas_shape (Canvas2D): Structure that contains the height and width of the output image. The static shapes of the width and height fields are used to represent the width and height of the image so that the static shape of the loaded image can be set correctly. The shape of the tensors contained in this structure can be either # 1D (temporally batched) or 2D (temporal + mini batch). input_height (tf.Tensor): 1-D or 2-D tensor for the heights of the images on disk. The 1-D and 2-D case respectively correspond to the absence and presence of temporal batching. input_width (tf.Tensor): 1-D or 2-D tensor for the widths of the images on disk. The 1-D and 2-D case respectively correspond to the absence and presence of temporal batching. """ def load(self, output_dtype): """ Load referenced images. Arguments: output_dtype (tf.dtypes.DType): Output image dtype. Returns: (tf.Tensor) Tensor representing the loaded image. The type will be tf.float16 with height matching the static size of canvas_shape.height and width having the same size as the static size of canvas_shape.width. The shape of the tensor will be 4D when `paths` is a 1D tensor and 5D when `paths` is a 2D tensor. """ # The _decode_* functions below only handle floating point types for now. assert output_dtype in (tf.float32, tf.float16, tf.uint8) output_dtype = tf.float32 path_shape = self.path.shape.as_list() image_count = 1 for d in path_shape: if d is None: raise ValueError( """Shape of the input was not known statically, i.e one of the dimensions of image paths tensor was 'None': {}. This can happen, for example if tf.dataset.batch(n, drop_remainder=False), as then the batch dimension can be variable. Image loader requires statically known batch dimensions, so set drop_remainder=True, or use set_shape().""".format( path_shape ) ) image_count *= d # Flatten tensors so that we need only a single code path to load from (via map_fn) both # 1D and 2D paths. paths_flat = tf.reshape(self.path, (-1,)) extension_flat = tf.reshape(self.extension, (-1,)) input_height_flat = tf.reshape(self.input_height, (-1,)) input_width_flat = tf.reshape(self.input_width, (-1,)) bbox_normalizer = tf.concat( [ input_width_flat, input_height_flat, input_width_flat, input_height_flat ], axis=-1 ) channels = _CHANNELS depth = _DEPTH images = [] assert image_count == 1 for index in range(image_count): img = self._decode_image( paths_flat[index], channels, depth, output_dtype, extension_flat[index] ) # convert grayscale to RGB if channels == 1: img = tf.image.grayscale_to_rgb(img) sparse_boxes = tf.sparse.reorder(self.labels.vertices.coordinates) gt_labels = tf.reshape(sparse_boxes.values, [-1, 4]) gt_labels /= tf.cast(bbox_normalizer, tf.float32) if AUGMENTATIONS_VAL is not None: # inference/evaluation target_shape = tf.stack([W_TENSOR_VAL, H_TENSOR_VAL], axis=0) img, gt_labels = AUGMENTATIONS_VAL(img, gt_labels, target_shape) gt_labels *= tf.cast(tf.concat([target_shape, target_shape], axis=-1), tf.float32) new_labels = self._update_augmented_labels(gt_labels, sparse_boxes) img = tf.expand_dims(img, axis=0) img.set_shape([1, None, None, 3]) images.append(img) shapes = tf.reshape(tf.shape(images[0])[1:3], (1, 2)) return LabelledImages2D(images=images[0], labels=new_labels, shapes=shapes) def _get_dense_bboxes(self): coords = tf.sparse.reshape(self.labels.vertices.coordinates, [-1, 4]) bboxes = tf.reshape( tf.sparse.to_dense(coords), (-1, 4) ) h = tf.cast(tf.reshape(self.input_height, []), tf.float32) w = tf.cast(tf.reshape(self.input_width, []), tf.float32) # normalized coordinates for augmentations bboxes /= tf.stack([w, h, w, h], axis=-1) return bboxes def _update_augmented_labels(self, bboxes, sparse_bboxes): sparsed = tf.sparse.SparseTensor( indices=sparse_bboxes.indices, values=tf.reshape(bboxes, [-1]), dense_shape=sparse_bboxes.dense_shape ) new_vertices = self.labels.vertices._replace(coordinates=sparsed) new_labels = self.labels._replace(vertices=new_vertices) return new_labels def _load_and_decode(self, path, extension, height, width, output_dtype, channels=3, min_side=0, max_side=0, h_tensor=None, w_tensor=None): if _AUTO_RESIZE: image = self._decode_and_resize_image( height=height, width=width, frame_path=path, channels=channels, output_dtype=output_dtype, extension=extension ) elif (h_tensor is None) and (w_tensor is None): image = self._decode_and_pad_image( height=height, width=width, frame_path=path, channels=channels, output_dtype=output_dtype, extension=extension ) else: # dynamic shape with h_tensor and w_tensor image = self._decode_and_resize_image( height=height, width=width, frame_path=path, channels=channels, output_dtype=output_dtype, extension=extension, h_tensor=h_tensor, w_tensor=w_tensor ) # if in min_side mode, resize and keep AR if min_side > 0: image_hwc = tf.transpose(a=image, perm=[1, 2, 0]) target_size = self._calculate_target_size(height, width) image_resized = tf.image.resize_images(image_hwc, target_size) image = tf.cast(tf.transpose(a=image_resized, perm=[2, 0, 1]), output_dtype) return image def _calculate_target_size(self, height, width): """Calculate the target size for resize and keep aspect ratio.""" height = tf.cast(height, tf.float32) width = tf.cast(width, tf.float32) min_side = tf.cast(tf.convert_to_tensor(_MIN_SIDE, dtype=tf.int32), tf.float32) # resize the smaller side to target_size, keep aspect ratio target_size = tf.cond( pred=tf.less_equal(height, width), true_fn=lambda: tf.stack([ tf.cast(min_side, tf.int32), tf.cast((min_side / height) * width, tf.int32)]), false_fn=lambda: tf.stack([ tf.cast((min_side / width) * height, tf.int32), tf.cast(min_side, tf.int32)]) ) # if the larger side exceeds _MAX_SIDE max_side = tf.cast(tf.convert_to_tensor(_MAX_SIDE, dtype=tf.int32), tf.float32) target_size2 = tf.cond( pred=tf.less_equal(height, width), true_fn=lambda: tf.stack([ tf.cast((max_side / width) * height, tf.int32), tf.cast(max_side, tf.int32)]), false_fn=lambda: tf.stack([ tf.cast(max_side, tf.int32), tf.cast((max_side / height) * width, tf.int32)]) ) target_size = tf.minimum(target_size, target_size2) return target_size def _decode_and_pad_image( self, height, width, frame_path, channels, output_dtype, extension ): """Load and decode JPG/PNG image, and pad.""" data = tf.io.read_file(frame_path) image = decode_image(data, channels, extension) image = tf.transpose(a=image, perm=[2, 0, 1]) if output_dtype == tf.uint8: image = tf.cast(image, output_dtype) else: image = tf.cast(image, output_dtype) / 255 # For JPG and PNG with dtype uint8 we delay normalization to later in the pipeline. if _MIN_SIDE == 0: return self._pad_image_to_right_and_bottom(image) return image def _decode_and_resize_image( self, height, width, frame_path, channels, output_dtype, extension, h_tensor=None, w_tensor=None ): """Decode and resize images to target size.""" data = tf.io.read_file(frame_path) image = decode_image(data, channels, extension) image = tf.transpose(a=image, perm=[2, 0, 1]) if output_dtype == tf.uint8: image = tf.cast(image, output_dtype) else: image = tf.cast(image, output_dtype) / 255 # For JPG and PNG with dtype uint8 we delay normalization to later in the pipeline. if _MIN_SIDE == 0: return self._resize_image_to_target_size( image, h_tensor=h_tensor, w_tensor=w_tensor ) return image def _decode_image( self, frame_path, channels, depth, output_dtype, extension ): """Decode single image.""" data = tf.io.read_file(frame_path) image = tf.cast(decode_image(data, channels, extension, depth=depth), output_dtype) return image def resize_image( self, image, h_tensor, w_tensor ): """Resize image to target size.""" image_original = image image_resized = tf.image.resize_images(image, tf.stack([h_tensor, w_tensor], axis=0)) # if images are already in target size, do not resize # this can prevent some mistake in tensorflow resize because # if we still resize it the output can be different from original image. # so here we try to keep it untouched if it is already in target size, # this is compatible with old behavior if we manually pad the images # in dataset and it will not do resize. This is a hack to ensure # compatiblity of mAP. no_resize = tf.logical_and( tf.equal(tf.shape(image_original)[0], h_tensor), tf.equal(tf.shape(image_original)[1], w_tensor) ) image = tf.cond( no_resize, true_fn=lambda: image_original, false_fn=lambda: image_resized ) # HWC to CHW image = tf.transpose(image, (2, 0, 1)) return image def _decode_and_pad_png(self, height, width, frame_path, channels, output_dtype): """Load and decode a PNG frame, and pad.""" data = tf.io.read_file(frame_path) image = tf.image.decode_image(data, channels=channels) image = tf.transpose(a=image, perm=[2, 0, 1]) if output_dtype == tf.uint8: image = tf.cast(image, output_dtype) else: image = tf.cast(image, output_dtype) / 255 # For JPG and PNG with dtype uint8 we delay normalization to later in the pipeline. return self._pad_image_to_right_and_bottom(image) def _pad_image_to_right_and_bottom(self, image): """Pad image to the canvas shape. NOTE: the padding, if needed, happens to the right and bottom of the input image. Args: image (tf.Tensor): Expected to be a 3-D Tensor in [C, H, W] order. Returns: padded_image (tf.Tensor): 3-D Tensor in [C, H, W] order, where H and W may be different from those of the input (due to padding). """ height = tf.shape(input=image)[1] width = tf.shape(input=image)[2] output_height = tf.shape(input=self.canvas_shape.height)[-1] output_width = tf.shape(input=self.canvas_shape.width)[-1] pad_height = output_height - height pad_width = output_width - width padding = [ [0, 0], # channels. [0, pad_height], # height. [0, pad_width], ] # width. padded_image = tf.pad(tensor=image, paddings=padding, mode="CONSTANT") # This is needed because downstream processors rely on knowing the shape statically. padded_image.set_shape( [_CHANNELS, self.canvas_shape.height.shape[-1], self.canvas_shape.width.shape[-1]] ) return padded_image def _resize_image_to_target_size(self, image, h_tensor=None, w_tensor=None): """Resize image to the canvas shape. Args: image (tf.Tensor): Expected to be a 3-D Tensor in [C, H, W] order. Returns: image (tf.Tensor): 3-D Tensor in [C, H, W] order, where H and W may be different from those of the input (due to resize). """ if h_tensor is None: output_height = tf.shape(input=self.canvas_shape.height)[-1] else: output_height = h_tensor if w_tensor is None: output_width = tf.shape(input=self.canvas_shape.width)[-1] else: output_width = tf.shape(input=self.canvas_shape.width)[-1] image_original = image image_hwc = tf.transpose(a=image, perm=[1, 2, 0]) image_resized = tf.image.resize_images(image_hwc, [output_height, output_width]) image_resized = tf.cast(tf.transpose(a=image_resized, perm=[2, 0, 1]), image.dtype) # if images are already in target size, do not resize # this can prevent some mistake in tensorflow resize because # if we still resize it the output can be different from original image. # so here we try to keep it untouched if it is already in target size, # this is compatible with old behavior if we manually pad the images # in dataset and it will not do resize. This is a hack to ensure # compatiblity of mAP. no_resize = tf.logical_and( tf.equal(tf.shape(image_original)[1], output_height), tf.equal(tf.shape(image_original)[2], output_width) ) image = tf.cond( no_resize, true_fn=lambda: image_original, false_fn=lambda: image_resized ) if (h_tensor is None) and (w_tensor is None): image.set_shape( [_CHANNELS, self.canvas_shape.height.shape[-1], self.canvas_shape.width.shape[-1]] ) return image
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/types/images2d_reference.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Functions to process front / back markers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import tensorflow as tf REFERENCE_ANGLE = 0.0 FRONT_BACK_TOLERANCE = 5.0 * math.pi / 180.0 SIDE_ONLY_TOLERANCE = 2.0 * math.pi / 180.0 def _round_marker_tf(markers, tolerance=0.01): """Round markers to account for potential labeling errors. Args: markers (tf.Tensor): Either front or back marker values. tolerance (float): The tolerance within which we start rounding values. Returns: rounded_markers (tf.Tensor): <markers> that have been rounded. """ # First, round values close to 0.0. rounded_markers = tf.compat.v1.where( tf.logical_and(tf.less(markers, tolerance), tf.greater_equal(markers, 0.0)), tf.zeros_like(markers), # If condition is True. markers, ) # Then, round values close to 1.0. rounded_markers = tf.compat.v1.where( tf.greater(rounded_markers, 1.0 - tolerance), tf.ones_like(rounded_markers), rounded_markers, ) return rounded_markers def _minus_pi_plus_pi(orientations): """Puts orientations values in [-pi; pi[ range. Args: orientations (tf.Tensor): Contains values for orientation in radians. Shape is (N,). Returns: new_orientations (tf.Tensor): Same values as <orientations> but in [-pi; pi[ range. """ new_orientations = tf.math.floormod(orientations, 2.0 * math.pi) new_orientations = tf.compat.v1.where( new_orientations > math.pi, new_orientations - 2.0 * math.pi, new_orientations ) return new_orientations def map_markers_to_orientations( front_markers, back_markers, invalid_orientation, ref_angle=REFERENCE_ANGLE, tolerance=0.01, clockwise=False, ): """Map front / back markers to orientation values. An angle of 0.0 corresponds to the scenario where an object is in the same direction as the ego camera, its back facing towards the camera. Outputs radian values in the ]-pi; pi] range. Args: front_markers (tf.Tensor): Denotes the front marker of target objects. Shape is (N,), where N is the number of targets in a frame. back_markers (tf.Tensor): Likewise, but for the back marker. invalid_orientation (float): Value to populate bogus entries correpsonding to bogus (<front_markers, back_markers>) combos. ref_angle (float): Reference angle corresponding to the scenario where a vehicle is right in front of the camera with its back facing towards the camera. For legacy reasons, the default is -pi/2. clockwise (bool): Whether to count clockwise angles as positive values. False would correspond to trigonometric convention. Returns: orientations (tf.Tensor): Shape (N,) tensor containing the angle corresponding to (<front_markers>, <back_markers>). Values are radians. Raises: ValueError: If parameters are outside accepted ranges. """ if not (0.0 < tolerance < 1.0): raise ValueError( "map_markers_to_orientations accepts a tolerance in ]0.; 1.[ range only." ) if not (-math.pi <= ref_angle < math.pi): raise ValueError( "map_markers_to_orientations accepts a ref_angle in [-pi; pi[ range only." ) # First, round the markers. rounded_front_markers = _round_marker_tf(front_markers) rounded_back_markers = _round_marker_tf(back_markers) ones = tf.ones_like(rounded_front_markers) # Used for constants and what not. orientations = tf.zeros_like(front_markers) # Back only. is_back_only = tf.logical_and( tf.equal(rounded_front_markers, -1.0), tf.logical_or( tf.equal(rounded_back_markers, 0.0), tf.equal(rounded_back_markers, 1.0) ), ) orientations = tf.compat.v1.where( is_back_only, tf.zeros_like(rounded_front_markers), orientations ) # Front only. is_front_only = tf.logical_and( tf.equal(rounded_back_markers, -1.0), tf.logical_or( tf.equal(rounded_front_markers, 0.0), tf.equal(rounded_front_markers, 1.0) ), ) orientations = tf.compat.v1.where(is_front_only, math.pi * ones, orientations) # Front and right. is_front_and_right = tf.logical_and( tf.logical_and( tf.greater(rounded_front_markers, 0.0), tf.less(rounded_front_markers, 1.0) ), tf.equal(rounded_back_markers, 0.0), ) orientations = tf.compat.v1.where( is_front_and_right, -(math.pi / 2.0) * (2.0 * ones - rounded_front_markers), orientations, ) # Front and left. is_front_and_left = tf.logical_and( tf.logical_and( tf.greater(rounded_front_markers, 0.0), tf.less(rounded_front_markers, 1.0) ), tf.equal(rounded_back_markers, 1.0), ) orientations = tf.compat.v1.where( is_front_and_left, (math.pi / 2.0) * (ones + rounded_front_markers), orientations, ) # Back + right or left. is_back_and_side = tf.logical_and( tf.logical_or( tf.equal(rounded_front_markers, 0.0), # Left side. tf.equal(rounded_front_markers, 1.0), ), # Right side. tf.logical_and( tf.greater(rounded_back_markers, 0.0), tf.less(rounded_back_markers, 1.0) ), ) orientations = tf.compat.v1.where( is_back_and_side, (math.pi / 2.0) * (rounded_back_markers - rounded_front_markers), orientations, ) # Finally, only one of the sides is visible (when either (0.0, 1.0) or (1.0, 0.0)). is_side_only = tf.logical_or( tf.logical_and( tf.equal(rounded_front_markers, 0.0), tf.equal(rounded_back_markers, 1.0) ), tf.logical_and( tf.equal(rounded_front_markers, 1.0), tf.equal(rounded_back_markers, 0.0) ), ) orientations = tf.compat.v1.where( is_side_only, (math.pi / 2.0) * (rounded_back_markers - rounded_front_markers), orientations, ) # Shift and scale. if clockwise: orientations = -orientations orientations = orientations + ref_angle # Keep things in [-pi; pi[ range. orientations = _minus_pi_plus_pi(orientations) # Finally, if none of the cases had hit, set the entries to <invalid_orientation>. all_scenarios = tf.stack( [ is_back_only, is_front_only, is_front_and_right, is_front_and_left, is_back_and_side, is_side_only, ] ) is_any_scenario = tf.reduce_any(input_tensor=all_scenarios, axis=0) orientations = tf.compat.v1.where( is_any_scenario, orientations, invalid_orientation * ones # Keep as is. ) return orientations def _round_marker(marker, epsilon=0.05): """Helper function to round a marker value to either 0.0 or 1.0. Args: marker (float): Marker value. Expected to be in [0.0, 1.0] range. epsilon (float): Value within which to round. Returns: rounded_marker (float): <marker> rounded to either 0.0 or 1.0 if it is within epsilon of one or the other. """ rounded_marker = marker if abs(marker) < epsilon: rounded_marker = 0.0 elif abs(marker - 1.0) < epsilon: rounded_marker = 1.0 return rounded_marker def map_orientation_to_markers( orientation, ref_angle=REFERENCE_ANGLE, clockwise=False, front_back_tolerance=FRONT_BACK_TOLERANCE, side_only_tolerance=SIDE_ONLY_TOLERANCE, ): """Map orientation value to (front, back) marker values. Args: orientation (float): Orientation value in radians. Values are expected to be in [-pi; pi[. ref_angle (float): Reference angle corresponding to the scenario where a vehicle is right in front of the camera with its back facing towards the camera. For legacy reasons, the default is -pi/2. clockwise (bool): Whether to count clockwise angles as positive values. False would correspond to trigonometric convention. front_back_tolerance (float): Radian tolerance within which we consider <orientation> to be equal to that of a front- / back-only scenarios. side_only_tolerance (float): Likewise, but for either of the side-only scenarios. Returns: front (float): Corresponding front marker value. back (float): Idem, but for back marker value. Raises: ValueError: If ref_angle is outside accepted range. """ if not (-math.pi <= ref_angle < math.pi): raise ValueError( "map_orientation_to_markers accepts a ref_angle in [-pi; pi[ range only." ) # Adjust orientation coordinate system if need be. _orientation = orientation - ref_angle if clockwise: _orientation *= -1.0 # Put in [-pi, pi[ range. _orientation = _orientation % (2.0 * math.pi) _orientation = ( _orientation - 2.0 * math.pi if _orientation > math.pi else _orientation ) front = 0.0 back = 0.0 radian_factor = 2.0 / math.pi # For the following scenarios, we allow a certain tolerance on the orientation value: # - front or back only: if within <front_back_tolerance> of the exact value. # - side only: if within <side_only_tolerance> of the exact value. # As such, their corresponding checks will appear first in the following if / elif clause. if abs(_orientation) < front_back_tolerance: # Back only. front = -1.0 back = 0.0 elif ( abs(_orientation - math.pi) < front_back_tolerance or abs(_orientation + math.pi) < front_back_tolerance ): # Front only. front = 0.0 back = -1.0 elif abs(_orientation - math.pi / 2.0) < side_only_tolerance: # Left only. front = 0.0 back = 1.0 elif abs(_orientation + math.pi / 2.0) < side_only_tolerance: # Right only. front = 1.0 back = 0.0 elif -math.pi / 2.0 < _orientation <= 0.0: # ]-pi/2; 0] - back + right. front = 1.0 back = radian_factor * _orientation + 1.0 elif -math.pi < _orientation <= -math.pi / 2.0: # ]-pi; -pi/2] - front + right. front = radian_factor * _orientation + 2.0 back = 0.0 elif 0.0 < _orientation <= math.pi / 2.0: # ]0; pi/2] - back + left. front = 0.0 back = radian_factor * _orientation elif math.pi / 2.0 < _orientation <= math.pi: # ]pi/2; pi]. - front + left. front = radian_factor * _orientation - 1 back = 1.0 # Additional rounding. This is to be able to hard classify certain examples as side only, etc. front = _round_marker(front) back = _round_marker(back) return front, back
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/types/process_markers.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convenience tool for building sparse tensors from python nested lists.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf class SparseTensorBuilder(object): """Convenience object for building sparse tensors. This class enables arbitrary nesting, and the ability to convert to a tf.SparseTensor. In this test harness, it's used to build out coordinate tensors. Nesting is achieved by passing instances of `SparseTensorBuilder` as the `things` argument. This recursion is terminated when non-SparseTensorBuilder instances are passed in as things. Mixing and matching is *not* allowed, although it is valid to pass nothing in, which allows representing an item with no values. Example: # Used for brevity class Ex(SparseTensorBuilder): pass > builder = Ex( Ex( Ex(0, 1), Ex(2, 3) ), Ex( Ex(4, 5), Ex(6, 7), Ex(8, 9) ) ) > sparse_ex = builder.build() > sparse_ex.indices [ [0, 0, 0] [0, 0, 1] [0, 1, 0] [0, 1, 1] [1, 0, 0] [1, 0, 1] [1, 1, 0] [1, 1, 1] [1, 2, 0] [1, 2, 1] ] > sparse_ex.values [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] > sparse_ex.dense_shape [2, 3, 2] """ def __init__(self, *things): """Constructor. Args: things (object): A tuple of either SparseTensorBuilders, or scalars. """ self.things = things def build(self, val_type=None): """Converts this outwrap object into a tf.SparseTensor. Args: val_type (tf.dtype): The desired dtype of the values tensor. """ indices = [] values = [] shape = [] self._inner_populate(indices, values, shape, 0) return tf.SparseTensor( indices=tf.constant(indices, dtype=tf.int64), values=tf.constant(values, dtype=val_type), dense_shape=tf.constant(shape, dtype=tf.int64), ) def _inner_populate(self, indices, values, shape, depth, *prev_idx): while len(shape) <= depth: shape.append(0) if self.things and not isinstance(self.things[0], SparseTensorBuilder): for i, val in enumerate(self.things): indices.append(prev_idx + (i,)) values.append(val) else: for i, val in enumerate(self.things): index = prev_idx + (i,) val._inner_populate(indices, values, shape, depth + 1, *index) shape[depth] = max(shape[depth], len(self.things))
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/types/sparse_tensor_builder.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Bbox label type. This file also contains helper functions (some private, others public) for dealing with individual label types typically associated with bounding box labels, such as (front, back) markers and depth. It is possible in the future that these may become their own standalone types if such a need arises. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from collections import namedtuple import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.types.coordinates2d import ( Coordinates2D, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.tensor_transforms import ( map_and_stack, ) def augment_marker_labels(marker_labels, stm): """Augment marker labels. Why is the below check enough? For DriveNet, all STMs start out as a 3x3 identity matrices M. In determining the final STM, input STMS are right multiplied sequentially with a flip LR STM, and a combination of translation/zoom STMs which use the same underlying representation. A quick matrix multiply will show you that applying both a translate and a zoom STM is pretty much the same as applying one such STM with different parameters. Furthermore, given that the parameters passed to get the translate and scale STMs are always positive, the end result R of multiplying the initial STM M by the flip LR STM x translate/zoom STM shows that R[0, 0] is positive if and only if no flip LR STM was applied. NOTE: If rotations are introduced, this reasoning is no longer sufficient. Args: marker_labels (tf.Tensor): Contains either front or back marker values. stm (tf.Tensor): 3x3 spatial transformation matrix with which to augment ``marker_labels``. Returns: augmented_marker_labels (tf.Tensor): Contains the marker values with the spatial transformations encapsulated by ``stm`` applied to them. """ def no_flip(): return marker_labels def flip(): return tf.compat.v1.where( tf.equal(marker_labels, -1.0), marker_labels, 1.0 - marker_labels ) with tf.control_dependencies( [ tf.compat.v1.assert_equal(stm[0, 1], 0.0), tf.compat.v1.assert_equal(stm[1, 0], 0.0), ] ): augmentated_marker_labels = tf.cond( pred=stm[0, 0] < 0.0, true_fn=flip, false_fn=no_flip ) return augmentated_marker_labels def _get_begin_and_end_indices(sparse_tensor): """Helper function that returns the beginning and end indices per example. Args: sparse_tensor (tf.SparseTensor) Returns: begin_indices (tf.Tensor): i-th element indicates the index from which the i-th example's values in ``sparse_tensor`` starts. end_indices (tf.Tensor): i-th element indicates the last index pertaining to the i-th example's values in ``sparse_tensor``. indices_index (tf.Tensor): Range representation from 0 to the number of values in ``sparse_tensor``. """ indices = tf.cast(sparse_tensor.indices, tf.int32) count_per_example = tf.math.bincount(indices[:, 0], dtype=tf.int64) example_count = tf.size(input=count_per_example) begin_indices = tf.cumsum(count_per_example, exclusive=True) end_indices = tf.cumsum(count_per_example) indices_index = tf.range(example_count) return begin_indices, end_indices, indices_index def _transform_sparse(sparse_label, transformer, dtype=tf.float32): """Helper function to augment fields represented as tf.SparseTensor. Args: sparse_label (tf.SparseTensor): Field to transform. transformer (func): Signature is (index, values). ``index`` represents the outer-most index of the ``sparse_label`` to operate over, and ``values`` the values corresponding to this ``index``. dtype (tf.dtypes.Dtype). Returns: transformed_label (tf.SparseTensor): Augmented version of ``sparse_label``. """ begin_indices, end_indices, indices_index = _get_begin_and_end_indices(sparse_label) def apply_spatial_transform(index): begin_index = begin_indices[index] end_index = end_indices[index] current_values = sparse_label.values[begin_index:end_index] return transformer(index, current_values) transformed_label = tf.SparseTensor( values=map_and_stack(apply_spatial_transform, indices_index, dtype=dtype), indices=sparse_label.indices, dense_shape=sparse_label.dense_shape, ) return transformed_label def _augment_sparse_marker_labels(markers, transform): """Helper function to use in conjunction with map_and_stack to augment markers. These markers are specifically expected to be a tf.SparseTensor with indices over [Example, Frame, Object, Value]. Args: markers (tf.SparseTensor): Either front or back marker. transform (Transform): Transform to apply. Returns: transformed_markers (tf.SparseTensor): ``markers`` as transformed by ``transform``. """ def transformer(index, current_values): spatial_transform_matrix = transform.spatial_transform_matrix[index, :] return augment_marker_labels( marker_labels=current_values, stm=spatial_transform_matrix ) transformed_markers = _transform_sparse(markers, transformer) return transformed_markers def _augment_depth(depth, transform): """Helper function to use in conjunction with map_and_stack to augment object depths. ``depth`` is specifically expected to be a tf.SparseTensor with indices over [Example, Frame, Object, Value]. Args: depth (tf.SparseTensor): Contains depths of objects. transform (Transform): Transform to apply. Returns: transformed_depth (tf.SparseTensor): ``depth`` as transformed by ``transform``. """ def transformer(index, current_values): spatial_transform_matrix = transform.spatial_transform_matrix[index, :] # Zoom factor is the square root of the determinant of the left-top 2x2 corner of # the spatial transformation matrix. abs_determinant = tf.abs(tf.linalg.det(spatial_transform_matrix[:2, :2])) scale_factor = tf.sqrt(abs_determinant) return scale_factor * current_values transformed_depth = _transform_sparse(depth, transformer) return transformed_depth def _to_ltrb(coordinate_values): """Helper function to make sure coordinate values are series of [L, T, R, B]. Args: coordinate_values (tf.Tensor): 1-D Tensor containing series of [L, T, R, B] or possibly [R, T, L, B] coordinates due to LR flipping. Returns: ltrb_values (tf.Tensor): Same shape as ``input`` but always in [L, T, R, B] order. """ x1 = coordinate_values[::4] x2 = coordinate_values[2::4] y1 = coordinate_values[1::4] y2 = coordinate_values[3::4] xmin = tf.minimum(x1, x2) ymin = tf.minimum(y1, y2) xmax = tf.maximum(x1, x2) ymax = tf.maximum(y1, y2) ltrb_values = tf.reshape(tf.stack([xmin, ymin, xmax, ymax], axis=1), (-1,)) return ltrb_values # TODO(@williamz): Delete / move to types.Image2DReference or types.Session once the old # DriveNet dataloader code is removed, and its consumers can adapt to the new dataloader more # freely. FRAME_FEATURES = ["frame_id"] TARGET_FEATURES = [ "vertices", # Should this be named bbox_coords? "object_class", "occlusion", "truncation", "truncation_type", "is_cvip", "world_bbox_z", "non_facing", "front", "back", ] ADDITIONAL_FEATURES = ["source_weight"] _Bbox2DLabel = namedtuple( "Bbox2DLabel", FRAME_FEATURES + TARGET_FEATURES + ADDITIONAL_FEATURES ) class Bbox2DLabel(_Bbox2DLabel): """Bbox label. frame_id (tf.Tensor): Frame id (str). vertices (Coordinates2D): Vertex coordinates for the bounding boxes. These follow the same definition as in Coordinates2D. However, instead of representing the 8 coordinate values of a bounding box explicitly, it will only contain series of [L, T, R, B] values. This is such that consumers of this label can reliably use those coordinates in that order. object_class (tf.SparseTensor): Class names associated with each bounding box. TODO(@williamz): Should we have the mapped values for this or preserve the original values? e.g. in a SQLite export from HumanLoop, these would be 'unknown', 'full', 'bottom', ..., but mapped to 0, 1, or 2. occlusion (tf.SparseTensor): Occlusion level of each bounding box. Is an int in {0, 1, 2}. truncation (tf.SparseTensor): Truncation level of each bounding box. Is a float in the range [0., 1.]. truncation_type (tf.SparseTensor): An int (REALLY SHOULD BE A BOOLEAN TO BEGIN WITH??) that is 0 for not truncated, 1 for any form of truncation. is_cvip (tf.SparseTensor): Boolean tensor indicating whether a bounding box is the CVIP. world_bbox_z (tf.SparseTensor): Depth of an object. non_facing (tf.SparseTensor): Boolean tensor indicating whether an object (traffic light or road sign) is not facing us. front (tf.SparseTensor): Float tensor for where the front marker of an object is. Values are in [0., 1.] U {-1.}. back (tf.SparseTensor): Same as above, but for the rear marker of an object. """ FRAME_FEATURES = FRAME_FEATURES TARGET_FEATURES = TARGET_FEATURES ADDITIONAL_FEATURES = ADDITIONAL_FEATURES def apply(self, transform, **kwargs): """ Applies transformation to various bounding box level features. Args: transform (Transform): Transform to apply. Returns: (Bbox2DLabel): Transformed Bbox2DLabel. """ transformed_coords = self.vertices.apply(transform) # To make sure downstream users can rely on the order being [L, T, R, B], we need to account # for possible LR-flip augmentations which would switch R with L. new_coords = _to_ltrb(transformed_coords.coordinates.values) fields_to_replace = dict() fields_to_replace["vertices"] = Coordinates2D( coordinates=tf.SparseTensor( values=new_coords, indices=transformed_coords.coordinates.indices, dense_shape=transformed_coords.coordinates.dense_shape, ), canvas_shape=transformed_coords.canvas_shape, ) if isinstance(self.front, tf.SparseTensor): # Could be an optional label. fields_to_replace["front"] = _augment_sparse_marker_labels( self.front, transform ) if isinstance(self.back, tf.SparseTensor): # Could be an optional label. fields_to_replace["back"] = _augment_sparse_marker_labels( self.back, transform ) if isinstance( self.world_bbox_z, tf.SparseTensor ): # Could be an optional label. fields_to_replace["world_bbox_z"] = _augment_depth( self.world_bbox_z, transform ) return self._replace(**fields_to_replace) def _filter_vertices(self, valid_indices): """Helper function for returning filtered vertices. Args: valid_indices (tf.Tensor): 1-D boolean values with which to filter labels. Returns: (Coordinates2D): Filtered bbox vertices. """ old_coords = self.vertices.coordinates old_values = tf.reshape(old_coords.values, [-1, 4]) new_values = tf.boolean_mask(tensor=old_values, mask=valid_indices) new_values = tf.reshape(new_values, [-1]) # 4 sparse indices per valid label index. valid_sparse_indices = tf.reshape( tf.stack([valid_indices for _ in range(4)], axis=1), [-1] ) new_indices = tf.boolean_mask( tensor=old_coords.indices, mask=valid_sparse_indices ) new_coords_sparse_tensor = tf.SparseTensor( values=new_values, indices=new_indices, dense_shape=old_coords.dense_shape ) return self.vertices._replace(coordinates=new_coords_sparse_tensor) def filter(self, valid_indices): """ Only keep those labels as indexed by ``valid_indices``. It is important to note that this filtering mechanism DOES NOT touch the dense_shape of the underlying tf.SparseTensor instances, as those may contain very important information. e.g. if the filtering happens to remove all objects from a particular frame, that should not bring the dense_shape's entry counting the total number of frames, as that may be used for things such as batch size, etc. Args: valid_indices (tf.Tensor): 1-D boolean values with which to filter labels. Returns: (Bbox2DLabel): Filtered Bbox2DLabel. """ filtered_features = dict() for feature_name in TARGET_FEATURES: old_tensor = getattr(self, feature_name) if feature_name == "vertices": filtered_features[feature_name] = self._filter_vertices(valid_indices) elif isinstance(old_tensor, tf.SparseTensor): # Other features expected to be tf.SparseTensor. new_values = tf.boolean_mask( tensor=old_tensor.values, mask=valid_indices ) new_indices = tf.boolean_mask( tensor=old_tensor.indices, mask=valid_indices ) filtered_features[feature_name] = tf.SparseTensor( values=new_values, indices=new_indices, dense_shape=old_tensor.dense_shape, ) return self._replace(**filtered_features) def filter_bbox_label_based_on_minimum_dims(bbox_2d_label, min_height, min_width): """Filter out entries in a label whose dimensions are less than some specified thresholds. Args: bbox_2d_label (Bbox2DLabel): Input label. min_height (float): Minimum height that an entry in ``box_2d_label`` must satisfy in order to be retained. min_width (float): Same but for width. Returns: filtered_label (Bbox2DLabel): Label with offending entries filtered out. """ coords = _to_ltrb(bbox_2d_label.vertices.coordinates.values) width = coords[2::4] - coords[::4] height = coords[3::4] - coords[1::4] valid_indices = tf.logical_and( tf.greater_equal(width, min_width), tf.greater_equal(height, min_height) ) return bbox_2d_label.filter(valid_indices=valid_indices)
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/types/bbox_2d_label.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Processor for extracting random glimpses from images and labels.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from nvidia_tao_tf1.blocks.multi_source_loader.processors.transform_processor import ( TransformProcessor, ) from nvidia_tao_tf1.core.coreobject import save_args from nvidia_tao_tf1.core.processors import RandomGlimpse as _RandomGlimpse class RandomGlimpse(TransformProcessor): """Processor for extracting random glimpses of images and labels.""" # Always crop the center region. CENTER = "center" # Crop at random location keeping the cropped region within original image bounds. RANDOM = "random" CROP_LOCATIONS = [CENTER, RANDOM] @save_args def __init__(self, height, width, crop_location=CENTER, crop_probability=0.5): """Construct a RandomGlimpse processor. Args: height (int) New height to which contents will be either cropped or scaled down to. width (int) New width to which contents will be either cropper or scaled down to. crop_location (str): Enumeration specifying how the crop location is selected. crop_probability (float): Probability at which a crop is performed. """ super(RandomGlimpse, self).__init__( _RandomGlimpse( crop_location=crop_location, crop_probability=crop_probability, height=height, width=width, ) )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/random_glimpse.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Class for the path generation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.processors.polyline_clipper import ( PolylineClipper, ) from nvidia_tao_tf1.blocks.multi_source_loader.types import ( Coordinates2DWithCounts, ) from nvidia_tao_tf1.blocks.multi_source_loader.types import ( Example, FEATURE_CAMERA, LABEL_MAP, ) from nvidia_tao_tf1.core.coreobject import TAOObject, save_args from modulus.processors import PathGenerator as MaglevPathGenerator NUM_ATTRIBUTES_FOR_CLASS = 1 class PathGenerator(TAOObject): """PathGenerator loads and stores label processor configuration specific to PathNet.""" @save_args def __init__( self, nclasses, class_name_to_id, equidistant_interpolation=False, using_invalid_path_class=False, prior_assignment_constraint=False, npath_attributes=0, path_attribute_name_to_id=None, edges_per_path=2, ): """ Construct PathGenerator. Args: nclasses (int): Number of classes. class_name_to_id (dict): Contains mapping between output class name and output id. equidistant_interpolation (bool): If ``True`` interpolates points along the path edges that are equally spaced. If ``False``, uses logarithmic spacing that gets closer with increasing rank order of the path points. using_invalid_path_class (bool): If `True`, then the invalid_path class is being used. Every prior with no ground truth assigned is assigned to this class. Default: ``False``. prior_assignment_constraint (bool): If ``True`` enable the prior assignment constraint strategy to group priors to classes according to the scheme. If `False` allow no grouping of priors to classes. default: False. For details about prior assignment constraint, please go to following file: (ai-infra/moduluspy/lib/src/generate_path_from_edges/generate_path_from_edges.cc) and (https://confluence.nvidia.com/display/DLVEH/Adding+Exit+Paths+to+Pathnet #AddingExitPathstoPathnet-Majorconceptintroducedforimprovement:Priorconstrainting:). npath_attributes (int): Number of path attributes. path_attribute_name_to_id (dict): Contains mapping between path attribute name and id. edges_per_path (int): Number of edges per path. 3 with center rail included else 2. """ super(PathGenerator, self).__init__() self.nclasses = nclasses self.class_name_to_id = class_name_to_id self._equidistant_interpolation = equidistant_interpolation self._polyline_clipper = PolylineClipper(vertex_count_per_polyline=-1) self._path_priors = 0 self._points_priors = 0 self._prior_assignment_constraint = prior_assignment_constraint self._using_invalid_path_class = using_invalid_path_class self._edges_per_path = edges_per_path self.npath_attributes = npath_attributes self.path_attribute_name_to_id = path_attribute_name_to_id self._validate_class_name_to_id() def _validate_class_name_to_id(self): if len(self.class_name_to_id) == 0: raise ValueError( "There should be at least one output class " "in the target encoder. Found {}".format(len(self.class_name_to_id)) ) class_names = set() class_names = {class_name for class_name in self.class_name_to_id.keys()} if len(self.class_name_to_id) > len(class_names): raise ValueError("Duplicated output class name in the target encoder.") def _zip_point_coordinates( self, x, y, scale_x=1.0, scale_y=1.0, translate_x=0, translate_y=0 ): """ Utility function for normalizing/adjusting and zipping polyline coordinates. Args: x (float): x coordinate of the vertices of the polylines. y (float): y coordinate of the vertices of the polylines. scale_x (float): Factor for normalizing the x coordinates. scale_y (float): Factor for normalizing the y coordinates. translate_x (float): amount to translate the x coordinate. translate_y (float): amount to translate the y coordinate. Returns: polylines (tf.Tensor) with the normalized/adjusted vertices. """ x *= tf.cast(scale_x, tf.float32) y *= tf.cast(scale_y, tf.float32) x -= tf.cast(translate_x, tf.float32) y -= tf.cast(translate_y, tf.float32) return tf.stack([x, y], axis=1) def _reorder_attributes(self, attributes_per_polyline, npath_attributes=0): """ Enforce an order for attributes_per_polyline. Edge attributes are followed by path attributes. generate_path_from_edges.cc expects this order. Args: attributes_per_polyline (tf.Tensor): Tensor of shape [L,1] and type tf.int32. Attributes for each polyline. Every (npath_attributes + NUM_ATTRIBUTES_FOR_CLASS) elements belong to one polyline. The number of polylines is L / (npath_attribues + NUM_ATTRIBUTES_FOR_CLASS). npath_attributes (int): The number of path attributes. Returns: attributes_per_polyline (tf.Tensor): Tensor of shape [L,1] and type tf.int32. The first L elements are edge attributes. The Nth L elements are the (N-1)th path attributes. """ if npath_attributes > 0: nattribute_groups = npath_attributes + NUM_ATTRIBUTES_FOR_CLASS # L / nattribute_groups is the number of polylines. L = tf.shape(input=attributes_per_polyline)[0] base_index = tf.cast( nattribute_groups * tf.range(L / nattribute_groups), tf.int32 ) attributes_per_polyline_reshaped = tf.reshape( attributes_per_polyline, [L / nattribute_groups, nattribute_groups] ) edge_attribute_index_within_a_polyline = tf.cast( tf.math.argmin( input=tf.math.abs(attributes_per_polyline_reshaped), axis=1 ), tf.int32, ) # path_attribute index. path_attribute_index_within_a_polyline = ( npath_attributes - edge_attribute_index_within_a_polyline ) edge_attribute_index_1d = ( base_index + edge_attribute_index_within_a_polyline ) path_attribute_index_1d = ( base_index + path_attribute_index_within_a_polyline ) edge_attributes = tf.gather( attributes_per_polyline, edge_attribute_index_1d ) path_attributes = tf.gather( attributes_per_polyline, path_attribute_index_1d ) attributes_per_polyline = tf.reshape( tf.stack([edge_attributes, path_attributes]), [-1] ) return attributes_per_polyline def encode_dense(self, example, priors_generator): """ Encode dense path labels as targets compatible with PathNet loss and metrics. Path labels are PolygonLabel type. Args: example (Example): Example to apply the path generation operation on. priors_generator (PriorsGenerator): PriorsGenerator object. Returns: (`Example`): Example with the encoded labels. Encoded labels are 2D tensors of type tf.float32 and shape [C, P*4+TAG+SCALING] where: C: Number of path priors. P: Number of points per prior. 4: Length of point pair for the left and right edges ( currently encoded as (left_x, left_y, right_x, right_y)) TAG: Number of classes. SCALING: Scaling factor. """ frames = example.instances[FEATURE_CAMERA] labels = example.labels[LABEL_MAP] _, _, height, width = frames.get_shape().as_list() # Encode the paths from the image. polylines = labels.polygons vertices_per_polyline = labels.vertices_per_polygon class_ids_per_polyline = labels.class_ids_per_polygon attributes_per_polyline = labels.attributes_per_polygon target = self._encode_dense( priors_generator, height, width, height, width, polylines, vertices_per_polyline, class_ids_per_polyline, attributes_per_polyline, ) labels = example.labels labels[LABEL_MAP] = target return Example(instances=example.instances, labels=labels) def _encode_dense( self, priors_generator, height, width, canvas_height, canvas_width, polylines, vertices_per_polyline, class_ids_per_polyline, attributes_per_polyline, ): image_boundaries = tf.constant( [ [0, 0], [0, canvas_height], [canvas_width, canvas_height], [canvas_width, 0], [0, 0], ], tf.float32, ) maglev_path_generator = MaglevPathGenerator( width=width, height=height, nclasses=self.nclasses, nall_priors=priors_generator.nall_priors, points_per_prior=priors_generator.points_per_prior, npath_attributes=self.npath_attributes, prior_threshold=priors_generator.prior_threshold, equal_spacing=self._equidistant_interpolation, prior_assignment_constraint=self._prior_assignment_constraint, using_invalid_path_class=self._using_invalid_path_class, edges_per_path=self._edges_per_path, ) attributes_per_polyline = self._reorder_attributes( attributes_per_polyline, self.npath_attributes ) # Clip polygons to model input boundaries. ( clipped_polylines, _, clipped_vertices_per_polyline, clipped_class_ids_per_polyline, clipped_attributes_per_polyline, ) = self._polyline_clipper.clip( polylines, vertices_per_polyline, class_ids_per_polyline, attributes_per_polyline, maintain_vertex_number=False, polygon_mask=image_boundaries, ) # Normalize the polyline before sending to label processor. x, y = tf.unstack(clipped_polylines, axis=1) clipped_polylines = self._zip_point_coordinates( x, y, scale_x=1.0 / canvas_width, scale_y=1.0 / canvas_height ) target = maglev_path_generator( clipped_polylines, priors_generator.priors, clipped_vertices_per_polyline, clipped_class_ids_per_polyline, clipped_attributes_per_polyline, ) return target def encode_sparse(self, labels2d, priors_generator, image_shape, temporal=None): """ Encode sparse path labels as targets compatible with PathNet loss and metrics. Path labels are Polygon2DLabel type. Args: labels2d (Polygon2DLabel): A label containing 2D polygons/polylines and their associated classes and attributes. The first two dimensions of each tensor that this structure contains should be batch/example followed by a frame/time dimension. The rest of the dimensions encode type specific information. See Polygon2DLabel documentation for details. priors_generator (PriorsGenerator): PriorsGenerator object. image_shape (FrameShape): Namedtuple with height, width and channels. temporal (int): (optional) Temporal dimension of the batch. If not available, inferred from labels2d.vertices.canvas_shape.height.shape. Returns: (tf.Tensor): Dense tensor with encoded labels. Encoded labels are 4D tensors of type tf.float32 and shape [B, T, C, P*4+TAG+SCALING] where: B: Batch size. T: Number of frames. C: Number of path priors. P: Number of points per prior. 4: Length of point pair for the left and right edges ( currently encoded as (left_x, left_y, right_x, right_y)) TAG: Number of classes. SCALING: Scaling factor. """ assert isinstance( labels2d.vertices, Coordinates2DWithCounts ), "labels2d.vertices must be of type Coordinates2DWithCounts, was: {}".format( type(labels2d.vertices) ) example_count = labels2d.vertices.canvas_shape.height.shape.as_list()[0] if temporal is None: max_timesteps_in_example = labels2d.vertices.canvas_shape.height.shape.as_list()[ 1 ] else: max_timesteps_in_example = temporal polylines = labels2d.vertices.coordinates vertices_per_polyline = labels2d.vertices.vertices_count class_ids_per_polyline = labels2d.classes attributes_per_polyline = labels2d.attributes def _dense_wrapper(i, j): def _slice(spt, d): return tf.sparse.slice( spt, [i, j] + [0] * (d - 2), # probably there is better way to do than use the d [1, 1] + [spt.dense_shape[a] for a in range(2, d)], ) i_polylines = tf.reshape(_slice(polylines, d=5).values, [-1, 2]) i_class_ids_per_polyline = tf.reshape( _slice(class_ids_per_polyline, d=4).values, [-1] ) i_attrs_per_polyline = tf.reshape( _slice(attributes_per_polyline, d=4).values, [-1] ) i_vertices_per_polyline = tf.reshape( _slice(vertices_per_polyline, d=3).values, [-1] ) canvas_shape = labels2d.vertices.canvas_shape canvas_height = canvas_shape.height[i].shape.as_list()[-1] canvas_width = canvas_shape.width[i].shape.as_list()[-1] # Encode the paths from the image. target = self._encode_dense( priors_generator, image_shape.height, image_shape.width, canvas_height, canvas_width, i_polylines, i_vertices_per_polyline, i_class_ids_per_polyline, i_attrs_per_polyline, ) return target path_labels = [] for i in range(example_count): frames = [] for j in range(max_timesteps_in_example): frames.append(_dense_wrapper(i, j)) if len(frames) > 1: frames = tf.stack(frames, 0) else: frames = tf.expand_dims(frames[0], 0) path_labels.append(frames) if len(path_labels) > 1: path_labels = tf.stack(path_labels, 0) else: path_labels = tf.expand_dims(path_labels[0], 0) # Set shape to keep shape inference in-tact. path_label_shape = path_labels.get_shape().as_list() path_labels.set_shape( [ example_count, max_timesteps_in_example, path_label_shape[-2], path_label_shape[-1], ] ) return path_labels
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/path_generator.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for Crop processor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from parameterized import parameterized import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.processors.crop import Crop from nvidia_tao_tf1.blocks.multi_source_loader.processors.processor_test_case import ( ProcessorTestCase, ) from nvidia_tao_tf1.blocks.multi_source_loader.types import ( Example, FEATURE_CAMERA, LABEL_MAP, ) from nvidia_tao_tf1.core.coreobject import deserialize_tao_object class TestCrop(ProcessorTestCase): @parameterized.expand([[0, 0, 1, 1], [1, 1, 2, 2]]) def test_valid_bounds_do_not_raise(self, left, top, right, bottom): Crop(left=left, top=top, right=right, bottom=bottom) def test_crops_in_half(self): frames = tf.ones((1, 128, 240, 3)) labels = self.make_polygon_label( vertices=[[60, 32], [180, 32], [180, 96], [60, 96]] ) example = Example( instances={FEATURE_CAMERA: frames}, labels={LABEL_MAP: labels} ) expected_frames = tf.ones((1, 128, 120, 3)) # X coordinates become negative because cropping from the left translates the left side out # of view expected_labels = self.make_polygon_label( vertices=[[-60, 32], [60, 32], [60, 96], [-60, 96]] ) with self.test_session(): crop = Crop(left=120, top=0, right=240, bottom=128) cropped = crop.process(example) self.assertAllClose( expected_frames.eval(), cropped.instances[FEATURE_CAMERA].eval() ) self.assert_labels_close(expected_labels, cropped.labels[LABEL_MAP]) def test_serialization_and_deserialization(self): """Test that it is a TAOObject that can be serialized and deserialized.""" processor = Crop(left=120, top=0, right=240, bottom=128) processor_dict = processor.serialize() deserialized_processor = deserialize_tao_object(processor_dict) self.assertEqual( str(processor._transform), str(deserialized_processor._transform) )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/crop_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Transformation encapsulates spatial, color and canvas size changes produced by Transforms.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from nvidia_tao_tf1.core.coreobject import TAOObject, save_args class Transformation(TAOObject): """Transformation encapsulates spatial, color and canvas size changes produced by Transforms.""" @save_args def __init__(self, spatial_transform_matrix, color_transform_matrix, canvas_shape): """Construct transformation. Args: spatial_transform_matrix (Tensor): Spatial transform matrix. color_transform_matrix (Tensor): Color transform matrix. canvas_shape (Canvas2D): Shape of a 2 dimensional canvas. """ super(Transformation, self).__init__() self.spatial_transform_matrix = spatial_transform_matrix self.color_transform_matrix = color_transform_matrix self.canvas_shape = canvas_shape
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/transformation.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for PolygonRasterizer processor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from parameterized import parameterized from nvidia_tao_tf1.blocks.multi_source_loader.processors.multiple_polyline_to_polygon import ( # noqa MultiplePolylineToPolygon, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.processor_test_case import ( ProcessorTestCase, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.polygon2d_label import ( Polygon2DLabel, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.test_fixtures import ( make_coordinates2d, make_tags, ) from nvidia_tao_tf1.core.coreobject import deserialize_tao_object def make_labels( shapes_per_frame, height, width, coordinates_per_polygon, coordinate_values, classes, attributes, ): return Polygon2DLabel( vertices=make_coordinates2d( shapes_per_frame=shapes_per_frame, height=height, width=width, coordinates_per_polygon=coordinates_per_polygon, coordinate_values=coordinate_values, ), classes=make_tags(classes), attributes=make_tags(attributes), ) class TestMultiplePolylineToPolygon(ProcessorTestCase): @parameterized.expand([[tuple()], [(5,)], [(12, 2)]]) def test_empty_labels(self, batch_args): input_labels = self.make_empty_polygon2d_labels(*batch_args) processor = MultiplePolylineToPolygon([0], [0]) with self.session() as sess: transformed_labels = processor.process(input_labels) transformed_labels = sess.run(transformed_labels) input_labels = sess.run(input_labels) transformed_polygons = transformed_labels.vertices.coordinates input_polygons = input_labels.vertices.coordinates self.assertSparseEqual(transformed_polygons, input_polygons) self.assertSparseEqual(transformed_labels.classes, input_labels.classes) @parameterized.expand( [ # Input has Polylines that form a triangle in order to check if the op # correctly orders these polylines and combines them into a polygon. # The polylines are shuffled and need to be sorted in order to form a triangle. # For example, Output expected order is (1, 3, 2). This test case covers the # following scenario: # a) Polylines that need to be sorted to form polygon. [ [[3]], # Shapes per frame of Input. [[[[1], [0], [2]]]], # Classes [[[[0], [0], [0]]]], # Attributes (same attributes so combine all). 2, # Input Coordinates per polygon. [ [1.0, 1.0], [2.0, 4.0], [3.03, 1.04], [1.02, 1.04], [2.02, 4.03], [3.05, 1.01], ], # Input Coordinate values. [[1]], # Output shapes per frame. [[[[0]]]], # Output Classes. [[[[0]]]], # Output Attributes. [0], # Attribute ids list (For attribute to class mapping) [0], # Class ids list. (For attribute to class mapping) 6, # Output Coordinates per polygon. [ [1.0, 1.0], [2.0, 4.0], [2.02, 4.03], [3.05, 1.01], [3.03, 1.04], [1.02, 1.04], ], # Output Coordinate values. ], # Input has polylines in reverse order and are shuffled. The test checks # if the op can handle the case where polylines that form the polygon # are reversed as well shuffled. The output polylines should be sorted # in correct order and reverse should be handled. This test case covers # the following scenario. (The last two polylines out of the three are # reversed.) # b) Polylines that need to be sorted and reversed. [ [[3]], # Shapes per frame of Input. [[[[1], [0], [2]]]], # Classes [[[[0], [0], [0]]]], # Attributes (same attributes so combine all). 2, # Input Coordinates per polygon. [ [1.0, 1.0], [2.0, 4.0], [1.02, 1.04], [3.03, 1.04], [3.05, 1.01], [2.02, 4.03], ], # Input Coordinate values. [[1]], # Output shapes per frame. [[[[2]]]], # Output Classes. [[[[0]]]], # Output Attributes. [0], # Attribute ids list. (For attribute to class mapping) [2], # Class ids list. (For attribute to class mapping) 6, # Output Coordinates per polygon. [ [1.0, 1.0], [2.0, 4.0], [2.02, 4.03], [3.05, 1.01], [3.03, 1.04], [1.02, 1.04], ], # Output Coordinate values. ], # Input has polylines that form 2 triangles and one polyline is common # in both the triangles. The test checks whether the output indices # forming the 2 polygons are properly sorted and polyline with # multiple attributes are handled properly. The polyline with multiple # attributes is added in the 1st as well as 2nd polygon's vertices. # c) Handle Polylines with multiple attributes (same polyline in 2 # polygons). [ [[5]], # Shapes per frame of Input. [[[[1], [0], [2], [0], [1], [1]]]], # Classes [[[[0], [0], [0, 1], [1], [1]]]], # Attributes (one polyline has 2 # attributes). 2, # Input Coordinates per polygon. [ [1.0, 1.0], [2.0, 4.0], [1.02, 1.04], [3.03, 1.04], [3.05, 1.01], [2.02, 4.03], [4.0, 3.0], [2.1, 4.1], [3.04, 1.03], [4.07, 3.09], ], # Input Coordinate values. [[2]], # Output shapes per frame. [[[[2], [3]]]], # Output Classes. [[[[1], [0]]]], # Output Attributes. [0, 1], # Attribute ids list. (For attribute to class mapping) [3, 2], # Class ids list. (For attribute to class mapping) 6, # Output Coordinates per polygon. [ [3.05, 1.01], [2.02, 4.03], [2.1, 4.1], [4.0, 3.0], [4.07, 3.09], [3.04, 1.03], [1.0, 1.0], [2.0, 4.0], [2.02, 4.03], [3.05, 1.01], [3.03, 1.04], [1.02, 1.04], ], # Output Coordinate values. ], ] ) def test_multiple_polyline_to_polygon( self, shapes_per_frame, classes, attributes, coordinates_per_polygon, coordinate_values, expected_shapes_per_frame, expected_classes, expected_attributes, attribute_ids_list, class_ids_list, expected_coordinates_per_polygon, expected_coordinate_values, ): processor = MultiplePolylineToPolygon(attribute_ids_list, class_ids_list) with self.session() as sess: input_labels = make_labels( shapes_per_frame, 5, 5, coordinates_per_polygon, coordinate_values, classes, attributes, ) expected_labels = make_labels( expected_shapes_per_frame, 5, 5, expected_coordinates_per_polygon, expected_coordinate_values, expected_classes, expected_attributes, ) transformed_labels = processor.process(input_labels) transformed_labels, expected_labels = sess.run( [transformed_labels, expected_labels] ) transformed_polygons = transformed_labels.vertices.coordinates expected_polygons = expected_labels.vertices.coordinates self.assertSparseEqual(transformed_polygons, expected_polygons) self.assertSparseEqual(transformed_labels.classes, expected_labels.classes) @parameterized.expand( [ # Input to check that the polylines are not combined when none of the # attributes are equal. The input should be returned as is to the output. # This test case covers the following scenario: # d) Mix of polylines with attributes and polylines without attributes. # e) None of the polylines are combined because attributes are either # missing or all unequal. [ [[3]], # Shapes per frame of Input. [[[[1], [0], [2]]]], # Classes [[[[0], [1]]]], # Attributes. [0, 1], # Attribute ids list. (To define Attribute to class mapping) [0, 0], # Class ids list. (To define Attribute to class mapping) 2, # Input Coordinates per polygon. [ [1.0, 1.0], [2.0, 4.0], [1.02, 1.04], [3.03, 1.04], [3.05, 1.01], [2.02, 4.03], ], # Input Coordinate values. ], # Input to check that the op works with multiple frames in an example. # The input coordinates are random values. The test confirms that the # op can handle input with multiple time frames in an example as well. # f) Handle data with multiple frames in an example (time dimension). [ [[3, 4], [2]], # Shapes per frame of Input. [[[[0], [2], [1]], [[1], [2], [0], [1]]], [[[1], [1]]]], # Classes. [[[[0], [1], [2]], [[1], [0], [2]]], [[[0]]]], # Attributes. [0, 1, 2], # Attribute ids list. (To define attribute to class mapping) [0, 0, 1], # Class ids list. (To define attribute to class mapping) 2, # Input Coordinates per polygon. [ [1.0, 1.0], [5.0, 3.0], [2.0, 2.0], [3.0, 3.0], [1.0, 2.0], [5.0, 4.0], [1.02, 0.5], [0.2, 0.7], [1.4, 4.1], [2.1, 3.3], [0.7, 0.8], [0.9, 1.1], [4.5, 3.4], [4.0, 4.9], [3.1, 1.2], [2.2, 2.3], [3.3, 2.2], [3.4, 6.5], ], # Input Coordinate values. ], ] ) def test_not_combined( self, shapes_per_frame, classes, attributes, attribute_ids_list, class_ids_list, coordinates_per_polygon, coordinate_values, ): processor = MultiplePolylineToPolygon(attribute_ids_list, class_ids_list) with self.session() as sess: input_labels = make_labels( shapes_per_frame, 5, 5, coordinates_per_polygon, coordinate_values, classes, attributes, ) transformed_labels = processor.process(input_labels) transformed_labels, expected_labels = sess.run( [transformed_labels, input_labels] ) expected_polygon_shape = expected_labels.vertices.coordinates.dense_shape transformed_polygon_shape = ( transformed_labels.vertices.coordinates.dense_shape ) expected_class_shape = expected_labels.classes.dense_shape transformed_class_shape = transformed_labels.classes.dense_shape expected_class_values = expected_labels.classes.values transformed_class_values = transformed_labels.classes.values # Check whether the shapes of the returned polygons and classes are equal self.assertAllEqual(transformed_polygon_shape, expected_polygon_shape) self.assertAllEqual(transformed_class_shape, expected_class_shape) # Check if number of polygons is the same as input self.assertEqual(len(transformed_class_values), len(expected_class_values)) def test_serialization_and_deserialization(self): """Test that it is a TAOObject that can be serialized and deserialized.""" processor = MultiplePolylineToPolygon( attribute_id_list=[0, 1, 2], class_id_list=[0, 0, 1] ) processor_dict = processor.serialize() deserialized_processor_dict = deserialize_tao_object(processor_dict) assert ( processor._attribute_id_list == deserialized_processor_dict._attribute_id_list ) assert processor._class_id_list == deserialized_processor_dict._class_id_list
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/multiple_polyline_to_polygon_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for RasterizeAndResize processor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import re from mock import call, Mock, patch from parameterized import parameterized import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.data_format import CHANNELS_LAST from nvidia_tao_tf1.blocks.multi_source_loader.processors.processor_test_case import ( ProcessorTestCase, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.rasterize_and_resize import ( RasterizeAndResize, ) from nvidia_tao_tf1.blocks.multi_source_loader.types import ( Example, FEATURE_CAMERA, LABEL_MAP, ) from nvidia_tao_tf1.core.coreobject import deserialize_tao_object class TestRasterizeAndResize(ProcessorTestCase): @parameterized.expand( [ [ 0, 1, tf.image.ResizeMethod.BILINEAR, "height: 0 is not a positive number.", ], [ 1, 0, tf.image.ResizeMethod.NEAREST_NEIGHBOR, "width: 0 is not a positive number.", ], [1, 1, "that", "Unrecognized resize_method: 'that'."], ] ) def test_assertions_fail_for_invalid_arguments( self, height, width, method, expected_message ): with self.assertRaisesRegexp(ValueError, re.escape(expected_message)): RasterizeAndResize( height=height, width=width, one_hot=True, binarize=True, resize_method=method, class_count=1, ) def test_supports_channels_last(self): processor = RasterizeAndResize( height=1, width=1, one_hot=True, binarize=True, class_count=1 ) assert processor.supported_formats == [CHANNELS_LAST] def test_does_not_compose(self): processor = RasterizeAndResize( height=1, width=1, one_hot=True, binarize=True, class_count=1 ) assert not processor.can_compose(Mock()) def test_compose_raises(self): with self.assertRaises(NotImplementedError): processor = RasterizeAndResize( height=1, width=1, one_hot=True, binarize=True, class_count=1 ) processor.compose(Mock()) def test_does_not_resize_frames_when_disabled(self): frames = tf.ones((16, 128, 240, 3)) polygon = self.make_polygon_label([[120, 0.0], [240, 128], [0.0, 128]]) height = 256 width = 480 example = Example( instances={FEATURE_CAMERA: frames}, labels={LABEL_MAP: polygon} ) processor = RasterizeAndResize( height=height, width=width, one_hot=True, binarize=True, class_count=1, resize_frames=False, ) with self.test_session(): rasterized = processor.process(example) self.assertAllEqual( rasterized.instances[FEATURE_CAMERA].eval().shape, [16, 128, 240, 3] ) @parameterized.expand([[64, 120], [252, 480], [504, 960], [1008, 1920]]) def test_resizes_frames_to_requested_size(self, new_height, new_width): frames = tf.ones((16, 128, 240, 3)) polygon = self.make_polygon_label([[120, 0.0], [240, 128], [0.0, 128]]) example = Example( instances={FEATURE_CAMERA: frames}, labels={LABEL_MAP: polygon} ) processor = RasterizeAndResize( height=new_height, width=new_width, one_hot=True, binarize=True, class_count=1, resize_frames=True, ) with self.test_session(): rasterized = processor.process(example) self.assertAllEqual( rasterized.instances[FEATURE_CAMERA].eval().shape, [16, new_height, new_width, 3], ) self.assertAllEqual( rasterized.labels[LABEL_MAP].eval().shape, [1, new_height, new_width, 2] ) @parameterized.expand( [ [tf.image.ResizeMethod.AREA], [tf.image.ResizeMethod.BICUBIC], [tf.image.ResizeMethod.BILINEAR], [tf.image.ResizeMethod.NEAREST_NEIGHBOR], ] ) @patch("tensorflow.image.resize", side_effect=tf.image.resize) def test_resizes_with_given_algorithms(self, method, spied_resize): frames = tf.ones((16, 128, 240, 3)) polygon = self.make_polygon_label( vertices=[[60, 32], [180, 32], [180, 96], [60, 96]] ) example = Example( instances={FEATURE_CAMERA: frames}, labels={LABEL_MAP: polygon} ) processor = RasterizeAndResize( height=252, width=480, one_hot=True, binarize=True, class_count=1, resize_frames=True, resize_method=method, ) with self.test_session(): rasterized = processor.process(example) self.assertAllEqual( rasterized.instances[FEATURE_CAMERA].eval().shape, [16, 252, 480, 3] ) self.assertAllEqual( rasterized.labels[LABEL_MAP].eval().shape, [1, 252, 480, 2] ) spied_resize.assert_has_calls( [call(frames, size=(252, 480), method=method)] ) @parameterized.expand([[252, 480, 1], [252, 480, 3], [252, 480, 4], [252, 480, 7]]) def test_rasterizes_labels(self, new_height, new_width, class_count): frames = tf.ones((8, 128, 240, class_count)) polygon = self.make_polygon_label( vertices=[[60, 32], [180, 32], [180, 96], [60, 96]] ) example = Example( instances={FEATURE_CAMERA: frames}, labels={LABEL_MAP: polygon} ) processor = RasterizeAndResize( height=new_height, width=new_width, one_hot=True, binarize=True, class_count=class_count, resize_frames=True, ) with self.test_session(): rasterized = processor.process(example) self.assertAllEqual( rasterized.instances[FEATURE_CAMERA].eval().shape, [8, new_height, new_width, class_count], ) self.assertAllEqual( rasterized.labels[LABEL_MAP].eval().shape, [1, new_height, new_width, class_count + 1], ) def test_serialization_and_deserialization(self): """Test that it is a TAOObject that can be serialized and deserialized.""" processor = RasterizeAndResize( height=10, width=15, one_hot=True, binarize=True, class_count=3, resize_frames=True, ) processor_dict = processor.serialize() deserialized_dict = deserialize_tao_object(processor_dict) assert processor._height == deserialized_dict._height assert processor._width == deserialized_dict._width assert processor._resize_frames == deserialized_dict._resize_frames assert processor._resize_method == deserialized_dict._resize_method
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/rasterize_and_resize_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Source weight frame processors for assigning source weight for each frame. By assigning different source weight values to frames from different data source. We managed to treat data sources differently according to our need. Finally those source weight values will be used in the loss function calculation. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf import modulus.dataloader.humanloop_sqlite_dataset as hl_sql from nvidia_tao_tf1.core.coreobject import save_args class SourceWeightSQLFrameProcessor(hl_sql.FrameProcessor): """Adds additional field 'source_weight' to each frame from specific datasource.""" @save_args def __init__(self, source_weight=1.0): """ Init methods. Args: source_weight (float): Value by which to weight the loss for samples coming from this DataSource. """ self.source_weight = source_weight def add_fields(self, example): """ Add new fields to the example data structure (labels). Example: example.labels['BOX']['testfield_int'] = create_derived_field(tf.int32, shape=None) Args: example (namedtuple): data structure that the loader returns. """ example.instances["source_weight"] = hl_sql.create_derived_field( tf.float32, shape=None ) def map(self, example_col_idx, frame): """ Modify or inject values into the frame. Args: example_col_idx (namedtuple): example data structure, where fields are integers that correspond to the index of the value in 'row' dtype (str): label type, such as 'BOX' or 'POLYGON'. frame (list): flat list of values from the database for a frame. Use example_col_idx to find which element corresponds to which field in the 'example'. Return: modified 'frame'. """ if "source_weight" in example_col_idx.instances: col_idx = example_col_idx.instances["source_weight"] frame[col_idx] = self.source_weight return frame
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/source_weight_frame.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for RandomFlip processor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import re from mock import patch from parameterized import parameterized from nvidia_tao_tf1.blocks.multi_source_loader.processors.processor_test_case import ( ProcessorTestCase, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.random_flip import ( RandomFlip, ) from nvidia_tao_tf1.core.coreobject import deserialize_tao_object from modulus.processors.augment.spatial import random_flip_matrix def _esc(message): """Escape passed in string for regular expressions.""" return re.escape(message) class TestRandomFlip(ProcessorTestCase): @parameterized.expand( [ [ -0.1, 0.5, _esc( "RandomFlip.horizontal_probability (-0.1) is not within the range [0.0, 1.0]." ), ], [ 1.1, 0.5, _esc( "RandomFlip.horizontal_probability (1.1) is not within the range [0.0, 1.0]." ), ], [ 0.5, -0.1, _esc( "RandomFlip.vertical_probability (-0.1) is not within the range [0.0, 1.0]." ), ], [ 0.5, 1.1, _esc( "RandomFlip.vertical_probability (1.1) is not within the range [0.0, 1.0]." ), ], ] ) def test_raises_on_invalid_horizontal_probability( self, horizontal_probability, vertical_probability, message ): with self.assertRaisesRegexp(ValueError, message): RandomFlip( horizontal_probability=horizontal_probability, vertical_probability=vertical_probability, ) @parameterized.expand([(0.0, 0.0), (0.5, 0.5), (1.0, 1.0)]) @patch( "modulus.processors.augment.spatial.random_flip_matrix", side_effect=random_flip_matrix, ) def test_delegates_to_random_flip_matrix( self, horizontal_probability, vertical_probability, spied_random_flip_matrix ): example = self.make_example_128x240() augmentation = RandomFlip( horizontal_probability=horizontal_probability, vertical_probability=vertical_probability, ) augmentation.process(example) spied_random_flip_matrix.assert_called_with( horizontal_probability=horizontal_probability, vertical_probability=vertical_probability, height=128, width=240, batch_size=None, ) def test_serialization_and_deserialization(self): """Test that it is a TAOObject that can be serialized and deserialized.""" augmentation = RandomFlip(horizontal_probability=1.0, vertical_probability=1.0) augmentation_dict = augmentation.serialize() deserialized_augmentation = deserialize_tao_object(augmentation_dict) self.assertEqual( str(augmentation._transform), str(deserialized_augmentation._transform) )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/random_flip_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function from mock import MagicMock, Mock, patch, PropertyMock import pytest from nvidia_tao_tf1.blocks.multi_source_loader.data_format import ( CHANNELS_FIRST, CHANNELS_LAST, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.convert_data_format import ( ConvertDataFormat, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.pipeline import Pipeline from nvidia_tao_tf1.blocks.multi_source_loader.types import ( Example, FEATURE_CAMERA, SequenceExample, ) from nvidia_tao_tf1.core.coreobject import deserialize_tao_object @patch( "nvidia_tao_tf1.blocks.multi_source_loader.processors.convert_data_format.tf.transpose" ) def test_converts_channels_first_to_last(mocked_transpose): frames = Mock() instances = {FEATURE_CAMERA: frames} labels = {} example = Example(instances=instances, labels=labels) transposed_frames = Mock() transposed_instances = {FEATURE_CAMERA: transposed_frames} mocked_transpose.return_value = transposed_frames expected_output = Example(instances=transposed_instances, labels=labels) converter = ConvertDataFormat(CHANNELS_FIRST, CHANNELS_LAST) outputs = converter.process(example) mocked_transpose.assert_called_with(a=frames, perm=(0, 2, 3, 1)) assert expected_output == outputs @patch( "nvidia_tao_tf1.blocks.multi_source_loader.processors.convert_data_format.tf.transpose" ) def test_converts_channels_last_to_first(mocked_transpose): frames = Mock() instances = {FEATURE_CAMERA: frames} labels = {} example = Example(instances=instances, labels=labels) transposed_frames = Mock() transposed_instances = {FEATURE_CAMERA: transposed_frames} mocked_transpose.return_value = transposed_frames expected_output = Example(instances=transposed_instances, labels=labels) converter = ConvertDataFormat(CHANNELS_LAST, CHANNELS_FIRST) outputs = converter.process(example) mocked_transpose.assert_called_with(a=frames, perm=(0, 3, 1, 2)) assert expected_output == outputs def test_empty_pipeline_outputs_inputs(): pipeline = Pipeline( [], input_data_format=CHANNELS_FIRST, output_data_format=CHANNELS_FIRST ) inputs = [Mock()] outputs = pipeline.process(inputs) assert outputs == inputs def test_passes_outputs_to_the_next_processor(): processor1 = Mock(supported_formats=[CHANNELS_FIRST]) processor1.can_compose.return_value = False processor1_output = Mock() processor1.process.return_value = processor1_output processor2 = Mock(supported_formats=[CHANNELS_FIRST]) processor2.can_compose.return_value = False processor2_output = Mock() processor2.process.return_value = processor2_output pipeline = Pipeline( [processor1, processor2], input_data_format=CHANNELS_FIRST, output_data_format=CHANNELS_FIRST, ) example = Mock() output = pipeline.process([example]) processor1.process.assert_called_with(example) processor2.process.assert_called_with(processor1_output) assert output == [processor2_output] @pytest.mark.parametrize( "input_format, output_format, conversion_count", [ [CHANNELS_FIRST, CHANNELS_FIRST, 0], [CHANNELS_FIRST, CHANNELS_LAST, 1], [CHANNELS_LAST, CHANNELS_FIRST, 1], [CHANNELS_LAST, CHANNELS_LAST, 0], ], ) @patch( "nvidia_tao_tf1.blocks.multi_source_loader.processors.pipeline.ConvertDataFormat" ) def test_empty_pipeline_conversions( _MockedConvertDataFormat, input_format, output_format, conversion_count ): pipeline = Pipeline( [], input_data_format=input_format, output_data_format=output_format ) pipeline._build() assert len(pipeline._processors) == conversion_count assert _MockedConvertDataFormat.call_count == conversion_count @pytest.mark.parametrize( "input_format, processor_format, output_format, conversion_count", [ [CHANNELS_FIRST, CHANNELS_FIRST, CHANNELS_FIRST, 0], [CHANNELS_FIRST, CHANNELS_FIRST, CHANNELS_LAST, 1], [CHANNELS_FIRST, CHANNELS_LAST, CHANNELS_FIRST, 2], [CHANNELS_FIRST, CHANNELS_LAST, CHANNELS_LAST, 1], [CHANNELS_LAST, CHANNELS_FIRST, CHANNELS_FIRST, 1], [CHANNELS_LAST, CHANNELS_FIRST, CHANNELS_LAST, 2], [CHANNELS_LAST, CHANNELS_LAST, CHANNELS_FIRST, 1], [CHANNELS_LAST, CHANNELS_LAST, CHANNELS_LAST, 0], ], ) @patch( "nvidia_tao_tf1.blocks.multi_source_loader.processors.pipeline.ConvertDataFormat" ) def test_single_single_format_processor_pipeline( _MockedConvertDataFormat, input_format, processor_format, output_format, conversion_count, ): convert_instance = Mock() _MockedConvertDataFormat.return_value = convert_instance processor = Mock() processor_output = Mock() processor.supported_formats = [processor_format] processor.process.return_value = processor_output pipeline = Pipeline( [processor], input_data_format=input_format, output_data_format=output_format ) pipeline._build() assert len(pipeline._processors) == (conversion_count + 1) assert _MockedConvertDataFormat.call_count == conversion_count @pytest.mark.parametrize( "input_format, output_format, conversion_count", [ [CHANNELS_FIRST, CHANNELS_FIRST, 0], [CHANNELS_FIRST, CHANNELS_LAST, 1], [CHANNELS_LAST, CHANNELS_FIRST, 1], [CHANNELS_LAST, CHANNELS_LAST, 0], ], ) @patch( "nvidia_tao_tf1.blocks.multi_source_loader.processors.pipeline.ConvertDataFormat" ) def test_single_dual_format_processor( _MockedConvertDataFormat, input_format, output_format, conversion_count ): convert_instance = Mock() _MockedConvertDataFormat.return_value = convert_instance processor = Mock() processor_output = Mock() processor.supported_formats = [CHANNELS_FIRST, CHANNELS_LAST] processor.process.return_value = processor_output pipeline = Pipeline( [processor], input_data_format=input_format, output_data_format=output_format ) pipeline._build() assert len(pipeline._processors) == (conversion_count + 1) assert _MockedConvertDataFormat.call_count == conversion_count def test_does_not_convert_sequence_examples(): with patch.object(ConvertDataFormat, "process") as mocked_process: pipeline = Pipeline( [], input_data_format=CHANNELS_FIRST, output_data_format=CHANNELS_LAST ) example = SequenceExample(instances={}, labels={}) pipeline(example) assert 0 == mocked_process.call_count def test_single_composable_processor_is_not_modified(): transformer1 = Mock(supported_formats=[CHANNELS_LAST]) transformer1.can_compose.return_value = True pipeline = Pipeline( [transformer1], input_data_format=CHANNELS_LAST, output_data_format=CHANNELS_LAST, ) assert len(pipeline) == 1 assert pipeline[0] == transformer1 def test_composes_two_consecutive_processors_into_one(): transformer1 = Mock(supported_formats=[CHANNELS_LAST]) transformer1.can_compose.return_value = True composed = Mock(supported_formats=[CHANNELS_LAST]) composed.can_compose.return_value = False transformer1.compose.return_value = composed transformer2 = Mock(supported_formats=[CHANNELS_LAST]) transformer2.can_compose.return_value = True pipeline = Pipeline( [transformer1, transformer2], input_data_format=CHANNELS_LAST, output_data_format=CHANNELS_LAST, ) assert len(pipeline) == 2 pipeline._build() assert len(pipeline) == 1 def test_does_not_compose_last_composable_processor(): transformer1 = Mock(supported_formats=[CHANNELS_LAST]) transformer1.can_compose.return_value = False transformer2 = Mock(supported_formats=[CHANNELS_LAST]) transformer2.can_compose.return_value = False transformer3 = Mock(supported_formats=[CHANNELS_LAST]) transformer3.can_compose.return_value = True pipeline = Pipeline( [transformer1, transformer2, transformer3], input_data_format=CHANNELS_LAST, output_data_format=CHANNELS_LAST, ) assert len(pipeline) == 3 pipeline._build() assert len(pipeline) == 3 def test_does_not_compose_when_separated_by_composable(): transformer1 = MagicMock() type(transformer1).supported_formats = PropertyMock(return_value=[CHANNELS_LAST]) transformer1.can_compose.return_value = False transformer2 = MagicMock() type(transformer2).supported_formats = PropertyMock(return_value=[CHANNELS_LAST]) transformer2.can_compose.return_value = False transformer3 = MagicMock() type(transformer3).supported_formats = PropertyMock(return_value=[CHANNELS_LAST]) transformer3.can_compose.return_value = False pipeline = Pipeline( [transformer1, transformer2, transformer3], input_data_format=CHANNELS_LAST, output_data_format=CHANNELS_LAST, ) assert len(pipeline) == 3 pipeline._build() assert len(pipeline) == 3 def test_does_not_compose_processors_when_disabled(): transformer1 = Mock(supported_formats=[CHANNELS_LAST]) transformer1.can_compose.return_value = True composed = Mock(supported_formats=[CHANNELS_LAST]) composed.can_compose.return_value = False transformer1.compose.return_value = composed transformer2 = Mock(supported_formats=[CHANNELS_LAST]) transformer2.can_compose.return_value = True pipeline = Pipeline( [transformer1, transformer2], input_data_format=CHANNELS_LAST, output_data_format=CHANNELS_LAST, compose=False, ) assert len(pipeline) == 2 pipeline._build() assert len(pipeline) == 2 assert pipeline[0] == transformer1 assert pipeline[1] == transformer2 def test_serialization_and_deserialization(): """Test that it is a TAOObject that can be serialized and deserialized.""" pipeline = Pipeline( [], input_data_format=CHANNELS_LAST, output_data_format=CHANNELS_LAST, compose=False, ) pipeline_dict = pipeline.serialize() deserialized_dict = deserialize_tao_object(pipeline_dict) assert pipeline._processors == deserialized_dict._processors assert pipeline._output_data_format == deserialized_dict._output_data_format assert pipeline._input_data_format == deserialized_dict._input_data_format assert pipeline._compose == deserialized_dict._compose
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/pipeline_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for Crop processor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from parameterized import parameterized from nvidia_tao_tf1.blocks.multi_source_loader.processors.lossy_crop import ( LossyCrop, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.processor_test_case import ( ProcessorTestCase, ) from nvidia_tao_tf1.blocks.multi_source_loader.types import ( FEATURE_CAMERA, LABEL_MAP, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.test_fixtures import ( make_example, make_images2d, ) from nvidia_tao_tf1.core.coreobject import deserialize_tao_object class TestLossyCrop(ProcessorTestCase): @parameterized.expand([[0, 0, 1, 1], [1, 1, 2, 2]]) def test_valid_bounds_do_not_raise(self, left, top, right, bottom): LossyCrop(left=left, top=top, right=right, bottom=bottom) def test_crops_in_half(self): example = make_example( 128, 240, coordinate_values=[[42.0, 26.5], [116.0, 53.5], [74.0, 40.5]] ) expected_images2d = make_images2d( example_count=1, frames_per_example=1, height=128, width=120 ) # X coordinates become negative because cropping from the left translates the left side out # of view expected_coordinates = [-78.0, 26.5, -4.0, 53.5, -46.0, 40.5] with self.test_session(): crop = LossyCrop(left=120, top=0, right=240, bottom=128) cropped = crop.process(example) self.assertAllClose( expected_images2d.images.eval(), cropped.instances[FEATURE_CAMERA].images.eval(), ) self.assertAllClose( expected_coordinates, cropped.labels[LABEL_MAP].vertices.coordinates.values, ) self.assertEqual( 120, cropped.labels[LABEL_MAP].vertices.canvas_shape.width.shape[0] ) self.assertEqual( 128, cropped.labels[LABEL_MAP].vertices.canvas_shape.height.shape[0] ) def test_serialization_and_deserialization(self): """Test that it is a TAOObject that can be serialized and deserialized.""" crop = LossyCrop(left=120, top=0, right=240, bottom=128) crop_dict = crop.serialize() deserialized_crop = deserialize_tao_object(crop_dict) assert crop._left == deserialized_crop._left assert crop._top == deserialized_crop._top assert crop._right == deserialized_crop._right assert crop._bottom == deserialized_crop._bottom
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/lossy_crop_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test BboxClipper.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from parameterized import parameterized import pytest import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.processors.bbox_clipper import ( BboxClipper, ) from nvidia_tao_tf1.blocks.multi_source_loader.types import Bbox2DLabel from nvidia_tao_tf1.blocks.multi_source_loader.types import Coordinates2D from nvidia_tao_tf1.blocks.multi_source_loader.types import LABEL_OBJECT from nvidia_tao_tf1.blocks.multi_source_loader.types import SequenceExample from nvidia_tao_tf1.blocks.multi_source_loader.types import ( sparsify_dense_coordinates, ) from nvidia_tao_tf1.blocks.multi_source_loader.types import TransformedExample from nvidia_tao_tf1.blocks.multi_source_loader.types import ( vector_and_counts_to_sparse_tensor, ) from nvidia_tao_tf1.core.coreobject import deserialize_tao_object from modulus.types import Canvas2D from modulus.types import Example @pytest.mark.parametrize( "crop_left,crop_right,crop_top,crop_bottom", [ (1, 0, 0, 2), # crop_left > crop_right. (1, 1, 0, 2), # crop_left = crop_right. (0, 5, 7, 6), # crop_top > crop_bottom. (0, 5, 7, 7), # crop_top = crop_bottom. ], ) def test_bbox_clipper_raises(crop_left, crop_right, crop_top, crop_bottom): """Test that the appropriate error is raised when the input args are bogus.""" with pytest.raises(ValueError): BboxClipper( crop_left=crop_left, crop_right=crop_right, crop_top=crop_top, crop_bottom=crop_bottom, ) def test_bbox_clipper_process_raises_on_transformed_example(): """Test that the appropriate error is raised when a TransformedExample is supplied.""" bbox_clipper = BboxClipper() transformed_example = TransformedExample(example=None, transformation=None) with pytest.raises(ValueError): bbox_clipper.process(transformed_example) class TestBboxClipper(tf.test.TestCase): """Test BboxClipper.""" def _get_example(self, x, y, example_type=Example): """Get an example with a Bbox2DLabel for testing. Args: x (list): x coordinates. Expects a series of [xmin, xmax, xmin, xmax, ...]. y (list): corresponding y coordinates. example_type (class): One of SequenceExample, Example. Returns: Properly populated Example with a Bbox2dLabel. """ num_bboxes = tf.cast(tf.size(input=x) / 2, dtype=tf.int32) coordinates = sparsify_dense_coordinates( dense_coordinates=tf.stack([x, y], axis=1), vertex_counts_per_polygon=2 * tf.ones(num_bboxes, dtype=tf.int32), ) truncation_type = vector_and_counts_to_sparse_tensor( vector=tf.zeros(num_bboxes, dtype=tf.int32), counts=tf.ones(num_bboxes, dtype=tf.int32), ) # Initialize to empty fields. label_kwargs = { field_name: [] for field_name in Bbox2DLabel.TARGET_FEATURES + Bbox2DLabel.FRAME_FEATURES + Bbox2DLabel.ADDITIONAL_FEATURES } label_kwargs["vertices"] = Coordinates2D( coordinates=coordinates, canvas_shape=Canvas2D(height=tf.constant(604), width=tf.constant(960)), ) label_kwargs["truncation_type"] = truncation_type return example_type( instances=[], labels={LABEL_OBJECT: Bbox2DLabel(**label_kwargs)} ) @parameterized.expand( [ [[0.0, 1.0, 2.0, -3.0], [-4.0, 0.0, 2.0, 1.5], Example], [[0.0, 1.0, 2.0, -3.0], [-4.0, 0.0, 2.0, 1.5], SequenceExample], [[1.0 + i for i in range(8)], [2.1 + i for i in range(8)], Example], ] ) def test_no_op(self, x, y, example_type): """Test that if crop coordinates are all 0, nothing happens.""" bbox_clipper = BboxClipper(crop_left=0, crop_right=0, crop_top=0, crop_bottom=0) example = self._get_example(x=x, y=y, example_type=example_type) processed_example = bbox_clipper.process(example) with self.session() as session: original_label, output_label = session.run( [example.labels[LABEL_OBJECT], processed_example.labels[LABEL_OBJECT]] ) original_coords = original_label.vertices.coordinates.values output_coords = output_label.vertices.coordinates.values self.assertAllEqual(original_coords, output_coords) original_truncation_type = original_label.truncation_type.values output_truncation_type = output_label.truncation_type.values self.assertAllEqual(original_truncation_type, output_truncation_type) @parameterized.expand( [ # 1st bbox is half outside: should get clipped. [ [-1.0, 3.0, 4.0, 7.5], [6.0, 7.0, 8.0, 12.0], # x, y. [0.0, 6.0, 3.0, 7.0, 4.0, 8.0, 7.0, 11.0], # expected_coords. [1, 1], # expected_truncation_type. ], # End of first test case. # 1st bbox is completely outside: should not appear in output. [ [-3.0, -1.0, 4.0, 5.0], [6.0, 7.0, 8.0, 9.0], # x, y. [4.0, 8.0, 5.0, 9.0], # expected_coords. [0], # expected_truncation_type. ], # End of second test case. ] ) def test_process(self, x, y, expected_coords, expected_truncation_type): """Test that process does what it advertises (when it is not a no-op).""" bbox_clipper = BboxClipper( crop_left=0, crop_right=7, crop_top=0, crop_bottom=11 ) example = self._get_example(x=x, y=y) processed_example = bbox_clipper.process(example) with self.session() as session: output_label = session.run(processed_example.labels[LABEL_OBJECT]) # Check coordinates. self.assertAllEqual(output_label.vertices.coordinates.values, expected_coords) # Check truncation_type. self.assertAllEqual( output_label.truncation_type.values, expected_truncation_type ) def test_serialization_and_deserialization(self): """Test that it is a TAOObject that can be serialized and deserialized.""" bbox_clipper = BboxClipper( crop_left=0, crop_right=7, crop_top=0, crop_bottom=11 ) bbox_clipper_dict = bbox_clipper.serialize() deserialized_dict = deserialize_tao_object(bbox_clipper_dict) self.assertAllEqual(bbox_clipper._crop_left, deserialized_dict._crop_left) self.assertAllEqual(bbox_clipper._crop_right, deserialized_dict._crop_right) self.assertAllEqual(bbox_clipper._crop_bottom, deserialized_dict._crop_bottom) self.assertAllEqual(bbox_clipper._crop_top, deserialized_dict._crop_top)
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/bbox_clipper_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for instance_mapper.py""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.processors.instance_mapper import ( InstanceMapper, ) from nvidia_tao_tf1.blocks.multi_source_loader.types import Polygon2DLabel import nvidia_tao_tf1.blocks.multi_source_loader.types.test_fixtures as fixtures classes = [ [ [ ["drivable-space"], ["vehicle:car"], ["vehicle:truck"], ["vehicle:Car "], ["person group"], ["avlp-person:person"], ["avlp-person:Person"], ] ], [ [ ["drivable-space"], ["avlp-person:person"], ["avlp-person:Person"], ["vehicle:Car "], ["person group"], ["avlp-person:person "], ["avlp-person:Person"], ] ], ] attributes = [ [[[], ["object_1"], ["object_1"], ["object_1"], [], ["object_1"], ["object_1"]]], [[["object_1"], ["object_2"], ["object_2"], [], [], ["object_1"], ["object_1"]]], ] except_class = set(["drivable-space", "group"]) class TestInstanceMapper(tf.test.TestCase): def test_instance_mapping(self): with self.cached_session() as session: mapper = InstanceMapper( exceptions=except_class, default_has_instance=True, default_instance_id=0, ) polygon_2d_label = Polygon2DLabel( vertices=None, # vertices currently don't matter classes=fixtures.make_tags(classes), attributes=fixtures.make_tags(attributes), ) mapped_polygon_2d_label = mapper(polygon_2d_label) session.run(tf.compat.v1.tables_initializer()) self.assertAllEqual( [0, 1, 2, 1, 0, 3, 3, 0, 1, 1, 2, 0, 3, 3], mapped_polygon_2d_label.classes.values, ) def test_instance_mapping_reverse(self): with self.cached_session() as session: mapper = InstanceMapper( exceptions=except_class, default_has_instance=False, default_instance_id=-1, ) polygon_2d_label = Polygon2DLabel( vertices=None, # vertices currently don't matter classes=fixtures.make_tags(classes), attributes=fixtures.make_tags(attributes), ) mapped_polygon_2d_label = mapper(polygon_2d_label) session.run(tf.compat.v1.tables_initializer()) self.assertAllEqual( [0, -1, -1, -1, 1, -1, -1, 0, -1, -1, -1, 1, -1, -1], mapped_polygon_2d_label.classes.values, )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/instance_mapper_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Class for clipping polylines representing priors or path labels.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from tensorflow.compat.v1 import py_func from nvidia_tao_tf1.core.coreobject import TAOObject, save_args from nvidia_tao_tf1.core.processors import ClipPolygon class PolylineClipper(TAOObject): """Clip priors and labels to image boundaries.""" @save_args def __init__(self, vertex_count_per_polyline=-1): """ Initialize the clipper processor. Args: vertex_count_per_polyline (int): It is only used for the priors, not needed for path labels. """ self._vertex_count_per_polyline = vertex_count_per_polyline self._clipper = ClipPolygon(closed=False) super(PolylineClipper, self).__init__() def _enforce_bottom_up_vertex_order(self, polylines): """ Vertices should still be ordered from bottom to top of the image (always decreasing y). The clipper can cause this ordering to be reversed. This is only important for the priors as only the ground truths are reordered in the path generator op. Code below will only work when the number of vertices is equal and maintained in each path. Args: polylines (N x 2 tf.Tensor): Vertices of the polylines. Returns: reordered_polylines (N x 2 tf.Tensor): Vertices of the correctly ordered polylines. """ polylines_reshaped = tf.reshape( tf.expand_dims(polylines, axis=-1), [-1, self._vertex_count_per_polyline, 2] ) polylines_to_reverse = tf.less( polylines_reshaped[:, 0, 1], polylines_reshaped[:, self._vertex_count_per_polyline - 1, 1], ) polylines_to_reverse = tf.tile( tf.expand_dims(polylines_to_reverse, axis=1), [1, self._vertex_count_per_polyline], ) polylines_to_reverse = tf.tile( tf.expand_dims(polylines_to_reverse, axis=2), [1, 1, 2] ) polylines_reordered = tf.compat.v1.where( polylines_to_reverse, tf.reverse(polylines_reshaped, [1]), polylines_reshaped, ) return tf.reshape(polylines_reordered, tf.shape(input=polylines)) def _resample_shortened_polylines( self, polylines, vertices_per_polyline, expected_vertices_per_polyline, polyline_index_map, ): """ Check for polylines that changed number of vertices after clipping. The number of vertices in a polyline can change after clipping if the clip occurs between the the second and n-1st vertex. This is only a concern when clipping polylines that are intended to have specific vertex counts, like priors. Currently this function simply repeats the last vertex until the number of vertex requirement is met. Args: polylines (N x 2 np.Tensor): Vertices of the polylines. vertices_per_polyline (N x 1 np.Tensor): Number of vertices for each polyline. expected_vertices_per_polyline (N x 1 np.Tensor): Number of vertices for each polyline. polyline_index_map (N x 1 np.Tensor): Original id of polylines. Returns: resampled_polylines (M x 2 np.Tensor): Vertices of the resampled polylines. resampled_vertices(M x 1 np.Tensor): Number of vertices for each resampled polyline. """ # TODO(blythe): Refactor to tensorflow functions. resampled_polylines = np.empty((0, 2), dtype=np.float32) resampled_vertices = np.empty((0,), dtype=np.int32) # Ensure that the number of expected vertices is always greater or equal # to the number of current vertices. assert not np.where( vertices_per_polyline > expected_vertices_per_polyline[polyline_index_map] )[ 0 ], "Requesting to subtract vertices which is not a valid operation for this processor." # First just check that there is a polyline with a vertex number change. polyline_needs_resampling = np.where( vertices_per_polyline != expected_vertices_per_polyline[polyline_index_map] )[0] if polyline_needs_resampling.size != 0: # Count up the number of vertices before each polyline starts. vertex_counts = np.cumsum(np.hstack((0, vertices_per_polyline))) for path in range(len(vertices_per_polyline)): start_vertex = vertex_counts[path] end_vertex = start_vertex + vertices_per_polyline[path] # Repeat last vertex additional_vertices times to regain the right # number of vertices. # TODO(blythe): Use interpolation to resample vertices rather than just repetition. additional_vertices = ( expected_vertices_per_polyline[polyline_index_map[path]] - vertices_per_polyline[path] ) resampled_polyline = polylines[start_vertex:end_vertex, :] resampled_polyline = np.vstack( ( resampled_polyline, np.tile(polylines[end_vertex - 1, :], (additional_vertices, 1)), ) ) resampled_polylines = np.vstack( (resampled_polylines, resampled_polyline) ) resampled_vertices = np.hstack( (resampled_vertices, np.int32(resampled_polyline.shape[0])) ) else: # If there are no splits, just pass the original input. resampled_polylines = polylines resampled_vertices = vertices_per_polyline return resampled_polylines, resampled_vertices def _remove_split_polylines( self, polylines, vertices_per_polyline, polyline_index_map ): """ Check for split polylines and keep polyline segments closest to bottom of image. Split polylines can occur when a labeled polyline is clipped to within the image boundaries, but the original polyline has multiple segments that result from the clipping. This function choses only one segment from the original polyline to train on. This means that this function may receive N polylines, but only return M polylines, where M <= N. Args: polylines (N x 2 np.Tensor): Vertices of the polylines. vertices_per_polyline (N x 1 np.Tensor): Number of vertices for each polyline. polyline_index_map (N x 1 np.Tensor): Original id of polylines. Returns: unsplit_polylines (M x 2 np.Tensor): Vertices of the unsplit polylines. unsplit_vertices(M x 1 np.Tensor): Number of vertices for each unsplit polyline. unsplit_index_map (N x 1 np.Tensor): Original id of unsplit polylines. """ # TODO(blythe): Refactor to tensorflow functions. unsplit_polylines = np.empty((0, 2), dtype=np.float32) unsplit_vertices = np.empty((0,), dtype=np.int32) unsplit_index_map = np.empty((0,), dtype=np.int32) # First just check that there is a split polyline. if np.unique(polyline_index_map).shape[0] < polyline_index_map.shape[0]: # Count up the number of vertices before each polyline starts. vertex_counts = np.cumsum(np.hstack((0, vertices_per_polyline))) # For each of the paths, determine whether there is a split or not. for path in np.unique(polyline_index_map): # Check if this particular polyline has a split or not. if len(np.where(polyline_index_map == path)[0]) > 1: # For the split polylines, find and keep only the polyline segment closest # to the bottom of the image. (y increases as you get closer to the # bottom of the image) max_y = -np.Inf polylines_to_keep = [] vertices_to_keep = [] for segment_num in np.where(polyline_index_map == path)[0]: start_vertex = vertex_counts[segment_num] end_vertex = start_vertex + vertices_per_polyline[segment_num] segment = polylines[start_vertex:end_vertex, :] if np.max(segment[:, 1]) > max_y: max_y = np.max(segment[:, 1]) polylines_to_keep = segment vertices_to_keep = vertices_per_polyline[segment_num] index_to_keep = polyline_index_map[segment_num] unsplit_polylines = np.vstack( (unsplit_polylines, polylines_to_keep) ) unsplit_vertices = np.hstack((unsplit_vertices, vertices_to_keep)) unsplit_index_map = np.hstack((unsplit_index_map, index_to_keep)) else: # For the polylines without a split, just keep the polyline and vertex number. segment_num = np.where(polyline_index_map == path)[0][0] start_vertex = vertex_counts[segment_num] end_vertex = start_vertex + vertices_per_polyline[segment_num] unsplit_polylines = np.vstack( (unsplit_polylines, polylines[start_vertex:end_vertex, :]) ) unsplit_vertices = np.hstack( (unsplit_vertices, vertices_per_polyline[segment_num]) ) unsplit_index_map = np.hstack( (unsplit_index_map, polyline_index_map[segment_num]) ) else: # If there are no splits, just pass the original input. unsplit_polylines = polylines unsplit_vertices = vertices_per_polyline unsplit_index_map = polyline_index_map return unsplit_polylines, unsplit_vertices, unsplit_index_map def _gather_clipped_attributes( self, attributes, clipped_index, class_ids_per_polyline ): """ Gather the attributes at the corresponding clipped_index. Args: attributes (tf.Tensor): 1D tensor of shape len(class_ids_per_polyline) * (npath_attributes + 1). clipped_index (tf.Tensor): 1D tensor with length of N holding the index of interest of the first attribute group. class_ids_per_polyline (tf.Tensor): 1D tensor, class ids for each polyline. Returns: attributes (tf.Tensor): Tensor of shape [1, len(clipped_index)* nattribute_groups]. """ def _clip_attributes(attributes, clipped_index, nattribute_groups): ncolumns = tf.reshape(tf.shape(input=clipped_index)[0], [1]) base_index = tf.reshape( tf.tile( tf.reshape(tf.range(nattribute_groups), [nattribute_groups, 1]), [1, ncolumns[0]], ), [-1], ) clipped_index = tf.tile(clipped_index, [nattribute_groups]) index = tf.stack([base_index, clipped_index], axis=-1) reshaped_attributes = tf.reshape(attributes, [nattribute_groups, -1]) clipped_attributes = tf.gather_nd(reshaped_attributes, index) return clipped_attributes nattribute_groups = tf.cond( pred=tf.equal(tf.shape(input=class_ids_per_polyline)[0], 0), true_fn=lambda: 1, false_fn=lambda: tf.compat.v1.div( tf.shape(input=attributes)[0], tf.shape(input=class_ids_per_polyline)[0] ), ) clipped_attributes = tf.cond( pred=tf.equal(nattribute_groups, 1), true_fn=lambda: tf.gather(attributes, clipped_index), false_fn=lambda: _clip_attributes( attributes, clipped_index, nattribute_groups ), ) return clipped_attributes def clip( self, polylines, vertices_per_polyline, class_ids_per_polyline, attributes_per_polyline, maintain_vertex_number, polygon_mask, ): # noqa: D405 (pydocstring bug) """ Clip the polylines to the model input boundaries. Args: polylines (tf.Tensor): Tensor of shape [N,2] and type tf.float32. Vertices of the polylines. vertices_per_polyline (tf.Tensor): Tensor of shape [L,1] and type tf.float32. Number of vertices for each polyline. class_ids_per_polyline (tf.Tensor): Tensor of shape [L,1] and type tf.float32. Class ids for each polyline. attributes_per_polyline (tf.Tensor): Tensor of shape [L,1] and type tf.float32. Attributes for each polyline. maintain_vertex_number (bool): True if expecting same number of vertices out as in. polygon_mask (tf.Tensor): Tensor of shape [5, 2] and type tf.float32. Contains the 4 corners (first one repeated) of the cropping boundary for the polylines. Points start at 0, 0 and proceed counter-clockwise around the border ending at 0, 0 also. Returns: clipped_polylines (tf.Tensor): Tensor of shape [M,2] and type tf.float32. Vertices of the clipped polylines. clipped_polyline_index_map (tf.Tensor): Tensor of shape [M,1] and type tf.float32. Index of surviving polylines into original polyline tensor. clipped_vertices_per_polyline (tf.Tensor): Tensor of shape [M,1] and type tf.float32. Number of vertices for each clipped polyline. clipped_class_ids_per_polyline (tf.Tensor): Tensor of shape [L,1] and type tf.float32. Class ids for each clipped polyline. clipped_attributes_per_polyline (tf.Tensor): Tensor of shape [L,1] and type tf.float32. Attributes for each clipped polyline. """ ( clipped_polylines, clipped_vertices_per_polyline, clipped_polyline_index_map, ) = self._clipper( polygons=polylines, points_per_polygon=vertices_per_polyline, polygon_mask=polygon_mask, ) # Check for and handle the case where the number of polylines increases due to splitting. # Keep only the polyline segment closest to bottom of the image. clipped_polylines, clipped_vertices_per_polyline, clipped_polyline_index_map = py_func( self._remove_split_polylines, [ clipped_polylines, clipped_vertices_per_polyline, clipped_polyline_index_map, ], [tf.float32, tf.int32, tf.int32], ) # Check for and handle the case where the number vertices per polyline changes # due to splitting. Only modify the vertices if need to maintain the vertex # number as indicated by maintain_vertex_number input. if maintain_vertex_number: clipped_polylines, clipped_vertices_per_polyline = py_func( self._resample_shortened_polylines, [ clipped_polylines, clipped_vertices_per_polyline, vertices_per_polyline, clipped_polyline_index_map, ], [tf.float32, tf.int32], ) clipped_polylines = self._enforce_bottom_up_vertex_order(clipped_polylines) clipped_polylines.set_shape([clipped_polylines.shape[0], 2]) # Check for and handle the case where the number of polylines decreases due to # being out of bounds. We just need to handle the meta data in this case. clipped_class_ids_per_polyline = tf.gather( class_ids_per_polyline, clipped_polyline_index_map ) clipped_attributes_per_polyline = self._gather_clipped_attributes( attributes_per_polyline, clipped_polyline_index_map, class_ids_per_polyline ) return ( clipped_polylines, clipped_polyline_index_map, clipped_vertices_per_polyline, clipped_class_ids_per_polyline, clipped_attributes_per_polyline, )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/polyline_clipper.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Class for mapping objects to output unique instance ids.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.processors import sparse_generators from nvidia_tao_tf1.blocks.multi_source_loader.types import Polygon2DLabel from nvidia_tao_tf1.blocks.multi_source_loader.types.coordinates2d import ( Coordinates2D, ) from nvidia_tao_tf1.core.coreobject import save_args from nvidia_tao_tf1.core.processors import Processor def make_indices(sub_index, num_vertices): """Make indices for vertices sparse tensor. Args: sub_index (list of int): Sub-index associated with the polyline. num_vertices (int): Total number of vertices in the polyline. Returns: indices (list of list of int): Indices for sparse tensor. """ indices = [] for vert_index in range(num_vertices): indices.append(sub_index + [vert_index, 0]) indices.append(sub_index + [vert_index, 1]) return indices def line_points(point1, point2): """Create a point list for chord joining point1 and point2. Args: point1 (list): 2D point [x, y]. point2 (list): 2D point [x, y]. Returns: pnt_list (list of vertices([x, y])): List of points joining point1 and point2. """ flag_rev = 0 # Find the minimum point between the two given points if (int(point1[0]) == int(point2[0]) and point1[1] > point2[1]) or ( point1[0] > point2[0] ): flag_rev = 1 # range_columns defines the number of columns in the image # that this line segment crosses range_columns = range(int(point1[0]), int(point2[0]), -1 if flag_rev else 1) point_list = [] if len(range_columns) == 0: # Both the columns are adjacent or lie on the same column, add points with # varying y values range_rows = range(int(point1[1]), int(point2[1]), -1 if flag_rev else 1) for x in range_rows: point_list.append([float(point1[0]), float(x)]) else: # Form the line equation and create a point list if point2[0] != point1[0]: slope = float(point2[1] - point1[1]) / float(point2[0] - point1[0]) const = point1[1] - (slope * point1[0]) else: slope = None const = None for x in range_columns: if slope is not None and const is not None: y = slope * float(x) + const point_list.append([float(x), y]) return point_list def sparse_to_dense_polyline(values, indices, shape, polyline_prefix_size): """Create dense polylines from sparse polylines. The function creates a dense polyline by calculating y for every integer x between the first vertex and last vertex of every polyline. Args: values (array of float): x and y values of vertices in the polyline. indices (array of array of int): Indices of the vertices sparse tensor. shape (array of int): Shape of the corresponding dense tensor. polyline_prefix_size (int): Number of indices columns that uniquely identify every polyline. Return: final_values: (array of float): x and y values of vertices in the polyline. final_indices (2D array of int): Indices of the output sparse tensor. final_shape (array of int): Shape of the dense output tensor. """ final_indices = [] final_values = [] max_no_vertices = 0 # Iterate through every polyline in the input sparse tensor and make the # polyline dense. for (polyline_sub_index, polyline_values, _) in sparse_generators.sparse_generator( polyline_prefix_size, values, indices ): total_list = [] # Since polyline values contains x and y of every vertex, it should be even # in length. assert ( len(polyline_values) % 2 == 0 ), "Polyline values array should have even length." # Iterate through the vertices of the polyline and populate vertices for # every missing integer x value between any two vertices. for vert_ind in range(0, len(polyline_values), 2): point1, point2 = None, None # Populate point1 only if y value (vert_ind + 1) is still within bounds. if (vert_ind + 1) < len(polyline_values): point1 = [polyline_values[vert_ind], polyline_values[vert_ind + 1]] # Populate point2 only if x value (vert_ind + 2) and y value (vert_ind + 3) # are still within bounds. if (vert_ind + 2) < len(polyline_values) and (vert_ind + 3) < len( polyline_values ): point2 = [polyline_values[vert_ind + 2], polyline_values[vert_ind + 3]] # If point1 and point2 are both populated, then find the vertices # between them. if (point1 is not None) and (point2 is not None): total_list = total_list + line_points(point1, point2) # If point1 is populated and point2 is not, just add point1 to the total list # of points. if (point1 is not None) and (point2 is None): total_list = total_list + [point1] # Add the last point to the list if it is not None. if point2 is not None: total_list = total_list + [point2] # Total vertices for the current polyline. total_vertices = len(total_list) # Check if total_vertices of this polyline exceeds the max vertices of this # polyline. if max_no_vertices < total_vertices: max_no_vertices = total_vertices # Flatten the total_list of vertices from this form # [[x1, y1], [x2, y2], [x3, y3], ...] to [x1, y1, x2, y2, x3, y3, ...] form. dense_polyline_values = [coord for point in total_list for coord in point] # Create indices for the sparse tensor representing this polyline. dense_polyline_indices = make_indices(polyline_sub_index, total_vertices) # Add this polyline to the final polyline list. final_values += dense_polyline_values final_indices += dense_polyline_indices # Create final shape and arrays to return. final_shape = [shape[0], shape[1], shape[2], max_no_vertices, shape[4]] final_indices = np.array(final_indices, dtype=np.int64) final_values = np.array(final_values, np.float32) final_shape = np.array(final_shape, np.int64) return final_values, final_indices, final_shape class SparseToDensePolyline(Processor): """SparseToDensePolyline processor creates a denser polyline from a sparse polyline. This processor will linearly interpolate vertices between all the adjacent points in a polyline. The resulting label will be a dense polyline with vertices for every integer x between the two connecting points. """ @save_args def __init__(self, **kwargs): """Construct a SparseToDensePolyline processor. Args """ super(SparseToDensePolyline, self).__init__(**kwargs) def call(self, polygon_2d_label): """Create dense polyline from sparse polyline. Args: polygon_2d_label (Polygon2DLabel): A label containing 2D polygons and their associated classes and attributes. Returns: (Polygon2DLabel): The label with the classes and attributes mapped to unique numeric id for each instance. """ polyline_prefix_size = 3 vertices = polygon_2d_label.vertices.coordinates # Create sparse to dense polyline. dense_vertices_values, dense_vertices_indices, dense_vertices_shape = tf.compat.v1.py_func( sparse_to_dense_polyline, [ vertices.values, vertices.indices, vertices.dense_shape, polyline_prefix_size, ], [tf.float32, tf.int64, tf.int64], ) # Create final vertices as Coordinates2D final_vertices = Coordinates2D( coordinates=tf.SparseTensor( values=dense_vertices_values, indices=dense_vertices_indices, dense_shape=dense_vertices_shape, ), canvas_shape=polygon_2d_label.vertices.canvas_shape, ) return Polygon2DLabel( vertices=final_vertices, classes=polygon_2d_label.classes, attributes=polygon_2d_label.attributes, )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/sparse_to_dense_polyline.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Processor for applying random zoom augmentations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from nvidia_tao_tf1.blocks.multi_source_loader.processors.transform_processor import ( TransformProcessor, ) from nvidia_tao_tf1.core.coreobject import save_args from nvidia_tao_tf1.core.processors import RandomZoom as _RandomZoom class RandomZoom(TransformProcessor): """Augmentation processor that randomly zooms in/out of images and labels.""" @save_args def __init__(self, ratio_min=0.5, ratio_max=1.5, probability=1.0): """Construct a RandomZoom processor. Args: ratio_min (float): The lower bound of the zooming ratio's uniform distribution. A zooming ratio of 1.0 will not affect the image, while values higher than 1 will result in 'zooming out' (image gets rendered smaller than the canvas), and vice versa for values below 1.0. ratio_max (float): The upper bound of the zooming ratio's uniform distribution. A zooming ratio of 1.0 will not affect the image, while values higher than 1 will result in 'zooming out' (image gets rendered smaller than the canvas), and vice versa for values below 1.0. """ super(RandomZoom, self).__init__(_RandomZoom(ratio_min, ratio_max, probability))
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/random_zoom.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ClassAttributeMapper benchmark suite.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from parameterized import parameterized import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.processors.class_attribute_mapper import ( ClassAttributeMapper, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.test_fixtures import ( make_polygon2d_label, ) from modulus.utils import test_session class ClassAttributeMapperBenchmark(tf.test.Benchmark): """ClassAttributeMapper benchmark suite.""" ITERATIONS = 100 def _benchmark_class_attribute_mapper( self, sess, class_mapping_count, match_class_count, match_attribute_count, example_count, frame_count, polygon_count, attribute_count, ): """ Build class_attribute_mapper and polygon_2d_label. Args: sess (tf.Session()): Session to run the benchmark. class_mapping_count (int): Number of classes in the class_mapping table. match_class_count (int): Number of class names in each match_any_class row. match_attribute_count (int): Number of attriute names in match_all/any_attribute row. example_count (int): Number of examples in each batch. frame_count (int): Number of frames in each example. polygon_count (int): Number of polygons in each image. attribute_count (int): Number of attributes in each polygon (class). """ # Build class mapping table. class_mapping = [] for index in range(class_mapping_count): _class = {} _class["match_any_class"] = ["path" for _ in range(match_class_count)] _class["match_any_attribute"] = [ "edge" for _ in range(match_attribute_count) ] _class["match_all_attributes"] = [ "EDGE" for _ in range(match_attribute_count) ] _class["class_name"] = "class_{}".format(index) _class["class_id"] = index class_mapping.append(_class) # Attribute mapping is implemend as lookup table, shoud be fast. attribute_mapping = [{"name": "attr1", "id": 1}, {"name": "attr2", "id": 2}] # Build polygon2d_lable. shapes_per_frame = [] for _ in range(example_count): shapes_per_frame.append([polygon_count for _ in range(frame_count)]) attributes = ["edge" for _ in range(attribute_count)] labels2d = make_polygon2d_label( shapes_per_frame=shapes_per_frame, shape_classes=["path"], shape_attributes=attributes, height=940, width=504, coordinates_per_polygon=3, ) mapper = ClassAttributeMapper( class_mapping, "Default", -1, attribute_mapping, -1 ) mappered_labels2d = mapper(labels2d) run_op = tf.group( mappered_labels2d.classes.values, mappered_labels2d.attributes.values ) sess.run(tf.compat.v1.tables_initializer()) self.run_op_benchmark( sess=sess, op_or_tensor=run_op, min_iters=self.ITERATIONS, store_trace=True, store_memory_usage=True, ) @parameterized.expand( [ [1, "/cpu:0"], [8, "/cpu:0"], [16, "/cpu:0"], [32, "/cpu:0"], [1, "/gpu:0"], [8, "/gpu:0"], [16, "/gpu:0"], [32, "/gpu:0"], ] ) def benchmark_attribute_mapper_example_count(self, example_count, device_placement): """Benchmark different numbers of examples.""" print("example_count {} device {}.".format(example_count, device_placement)) with tf.device(device_placement), test_session( allow_soft_placement=True ) as sess: self._benchmark_class_attribute_mapper( sess=sess, class_mapping_count=10, match_class_count=10, match_attribute_count=20, example_count=example_count, frame_count=3, polygon_count=5, attribute_count=3, ) @parameterized.expand( [ [1, "/cpu:0"], [10, "/cpu:0"], [50, "/cpu:0"], [100, "/cpu:0"], [1, "/gpu:0"], [10, "/gpu:0"], [50, "/gpu:0"], [100, "/gpu:0"], ] ) def benchmark_attribute_mapper_attribute_count( self, attribute_count, device_placement ): """Benchmark different numbers of attribute_count.""" print("attribute_count {} device {}.".format(attribute_count, device_placement)) with tf.device(device_placement), test_session( allow_soft_placement=True ) as sess: self._benchmark_class_attribute_mapper( sess=sess, class_mapping_count=10, match_class_count=10, match_attribute_count=20, example_count=32, frame_count=3, polygon_count=5, attribute_count=attribute_count, ) @parameterized.expand( [ [1, "/cpu:0"], [10, "/cpu:0"], [50, "/cpu:0"], [100, "/cpu:0"], [1, "/gpu:0"], [10, "/gpu:0"], [50, "/gpu:0"], [100, "/gpu:0"], ] ) def benchmark_attribute_mapper_class_mapping_count( self, class_mapping_count, device_placement ): """Benchmark different numbers of class_mapping_count.""" print( "class_mapping_count {} device {}.".format( class_mapping_count, device_placement ) ) with tf.device(device_placement), test_session( allow_soft_placement=True ) as sess: self._benchmark_class_attribute_mapper( sess=sess, class_mapping_count=class_mapping_count, match_class_count=10, match_attribute_count=20, example_count=32, frame_count=3, polygon_count=5, attribute_count=3, ) @parameterized.expand( [ [1, "/cpu:0"], [10, "/cpu:0"], [50, "/cpu:0"], [100, "/cpu:0"], [1, "/gpu:0"], [10, "/gpu:0"], [50, "/gpu:0"], [100, "/gpu:0"], ] ) def benchmark_attribute_mapper_match_class_count( self, match_class_count, device_placement ): """Benchmark different numbers of match_class_count.""" print( "match_class_count {} device {}.".format( match_class_count, device_placement ) ) with tf.device(device_placement), test_session( allow_soft_placement=True ) as sess: self._benchmark_class_attribute_mapper( sess=sess, class_mapping_count=10, match_class_count=match_class_count, match_attribute_count=20, example_count=32, frame_count=3, polygon_count=5, attribute_count=3, ) @parameterized.expand( [ [1, "/cpu:0"], [10, "/cpu:0"], [50, "/cpu:0"], [100, "/cpu:0"], [1, "/gpu:0"], [10, "/gpu:0"], [50, "/gpu:0"], [100, "/gpu:0"], ] ) def benchmark_attribute_mapper_match_attribute_count( self, match_attribute_count, device_placement ): """Benchmark different numbers of match_attribute_count.""" print( "match_attribute_count {} device {}.".format( match_attribute_count, device_placement ) ) with tf.device(device_placement), test_session( allow_soft_placement=True ) as sess: self._benchmark_class_attribute_mapper( sess=sess, class_mapping_count=10, match_class_count=10, match_attribute_count=match_attribute_count, example_count=32, frame_count=3, polygon_count=5, attribute_count=3, ) if __name__ == "__main__": tf.test.main()
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/class_attribute_mapper_benchmark.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mapper for converting 'rider' into the refined subclasses for subclassification.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import modulus.dataloader.humanloop_sqlite_dataset as hl_sql class RiderRefineMapper(hl_sql.FeatureProcessor): """Mapper for converting 'rider' to the refined subclasses.""" CONCERNED_ATTRIBUTES = {"vehicle", "bicycle", "motorcycle"} UNKNOWN_RIDER_TYPE = "unknown_rider" RIDER_ATTRIBUTES_MAPPING = { "vehicle": "vehicle_rider", "bicycle": "bicycle_rider", "motorcycle": "motorcycle_rider", } def add_fields(self, example): """No fields should be added.""" pass def filter(self, example_col_idx, dtype, row): """No filtering.""" return True def map(self, example_col_idx, dtype, row): """Convert 'rider' to the refined subclasses.""" label_idx = example_col_idx.labels if dtype == "BOX": # Only keep the concerned attributes for rider refine mapper. attrs = set(row[label_idx["BOX"]["attributes"]]) & self.CONCERNED_ATTRIBUTES classifier = row[label_idx["BOX"]["classifier"]] if classifier == "rider": if len(attrs) == 1: # Refined rider should only have one unique concerned attribute type. rider_attr = attrs.pop() new_classifier = self.RIDER_ATTRIBUTES_MAPPING[rider_attr] else: # Assign rider as UNKNOWN_RIDER_TYPE if there are no concerned attribute type # or multiple conflicted attributes. new_classifier = self.UNKNOWN_RIDER_TYPE row[label_idx["BOX"]["classifier"]] = new_classifier return row
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/rider_refine_mapper.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for class_attribute_lookup_table.py""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from parameterized import parameterized import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.processors.class_attribute_lookup_table import ( # noqa ClassAttributeLookupTable, ) from nvidia_tao_tf1.blocks.multi_source_loader.types import Polygon2DLabel from nvidia_tao_tf1.blocks.multi_source_loader.types.test_fixtures import ( make_tags, ) from nvidia_tao_tf1.core.coreobject import deserialize_tao_object def generate_polygon_2d_label(class_names, attribute_names): """ Generate a polygon_2d_label with class_names and attribute_names. Args: class_names (nested lists of strings): Class names of polygons in the polygon_2d_label. attribute_names (nested lists of strings): Attribute names of polygons in the polygon_2d_label. Return: (polygon_2d_label): A polygon_2d_label. """ return Polygon2DLabel( # Empty vertices vertices=tf.SparseTensor( indices=tf.reshape(tf.constant([], tf.int64), [0, 5]), values=tf.constant([], tf.int64), dense_shape=tf.constant([0, 0, 0, 0, 2], tf.int64), ), classes=make_tags(class_names), attributes=make_tags(attribute_names), ) class TestClassAttributeLookupTable(tf.test.TestCase): @tf.contrib.eager.run_test_in_graph_and_eager_modes def test_class_attribute_lookup(self): lookup_tables = { "attribute_mapping": { "ak1": 1, "ak2": 2, "ak3": 3, "ak4": 4, "ak5": 5, "ak6": 6, }, "default_attribute_value": -1, "class_mapping": { "ck1": -1, "ck2": -2, "ck3": -3, "ck4": -4, "ck5": -5, "ck6": -6, }, "default_class_value": 0, } class_attribute_lookup_table = ClassAttributeLookupTable(**lookup_tables) # Single example, single frame and two polygons in this frame. The first polygon # has one class and two attributes. The second polygon has one class and one attribute. polygon_2d_label = generate_polygon_2d_label( attribute_names=[[[["aK1", "ak3"], ["ak4"]]]], class_names=[[[["ck1"], [" ck2"]]]], ) mapped_polygon_2d_label = class_attribute_lookup_table(polygon_2d_label) if not tf.executing_eagerly(): self.evaluate(tf.compat.v1.tables_initializer()) self.assertAllEqual([-1, -2], mapped_polygon_2d_label.classes.values) self.assertAllEqual([1, 3, 4], mapped_polygon_2d_label.attributes.values) @tf.contrib.eager.run_test_in_graph_and_eager_modes def test_class_attribute_lookup_not_in_table(self): lookup_tables = { "attribute_mapping": { "AK1": 1, "ak2": 2, "ak3": 3, "ak4": 4, "ak5": 5, "ak6": 6, }, "default_attribute_value": -1, "class_mapping": { "ck1": -1, "ck2": -2, "ck3": -3, "ck4": -4, "ck5": -5, "ck6": -6, }, "default_class_value": 0, } class_attribute_lookup_table = ClassAttributeLookupTable(**lookup_tables) polygon_2d_label = generate_polygon_2d_label( attribute_names=[[[["ak1", " ak3"], ["ak8"]]]], class_names=[[[["ck1"], ["ck0 "]]]], ) mapped_polygon_2d_label = class_attribute_lookup_table(polygon_2d_label) if not tf.executing_eagerly(): self.evaluate(tf.compat.v1.tables_initializer()) self.assertAllEqual([-1, 0], mapped_polygon_2d_label.classes.values) self.assertAllEqual([1, 3, -1], mapped_polygon_2d_label.attributes.values) @tf.contrib.eager.run_test_in_graph_and_eager_modes def test_class_attribute_lookup_many_to_one(self): lookup_tables = { "attribute_mapping": { "ak1": 1, "ak2": 1, "Ak3": 1, "ak4": 4, "ak5": 5, "ak6": 6, }, "default_attribute_value": -1, "class_mapping": { "ck1": -1, "ck2": -2, "ck3": -3, "ck4": -4, "ck5": -5, "ck6": -6, }, "default_class_value": 0, } class_attribute_lookup_table = ClassAttributeLookupTable(**lookup_tables) polygon_2d_label = generate_polygon_2d_label( attribute_names=[[[["ak1", "ak2"], ["ak3"]]]], class_names=[[[["CK1"], ["ck2"]]]], ) mapped_polygon_2d_label = class_attribute_lookup_table(polygon_2d_label) if not tf.executing_eagerly(): self.evaluate(tf.compat.v1.tables_initializer()) self.assertAllEqual([-1, -2], mapped_polygon_2d_label.classes.values) self.assertAllEqual([1, 1, 1], mapped_polygon_2d_label.attributes.values) @tf.contrib.eager.run_test_in_graph_and_eager_modes def test_class_attribute_trimming(self): lookup_tables = { "attribute_mapping": {"ak1": 1, "ak2": 2}, "default_attribute_value": 0, "class_mapping": {"ck1": 1, "ck2 ": 2, "ck3": 3, "ck3 ": 3}, "default_class_value": 0, } class_attribute_lookup_table = ClassAttributeLookupTable(**lookup_tables) polygon_2d_label = generate_polygon_2d_label( attribute_names=[[[[" ak1"], [" ak2 "]]]], class_names=[[[[" ck3 "], ["ck1 "]]]], ) mapped_polygon_2d_label = class_attribute_lookup_table(polygon_2d_label) if not tf.executing_eagerly(): self.evaluate(tf.compat.v1.tables_initializer()) self.assertAllEqual([1, 2], mapped_polygon_2d_label.attributes.values) self.assertAllEqual([3, 1], mapped_polygon_2d_label.classes.values) @tf.contrib.eager.run_test_in_graph_and_eager_modes def test_class_attribute_trimming_error(self): lookup_tables = { "attribute_mapping": {}, "default_attribute_value": -1, "class_mapping": {"ck1": 1, "ck1 ": 2}, "default_class_value": 0, } self.assertRaises(ValueError, ClassAttributeLookupTable, **lookup_tables) @tf.contrib.eager.run_test_in_graph_and_eager_modes def test_class_attribute_lower_error(self): lookup_tables = { "attribute_mapping": {}, "default_attribute_value": -1, "class_mapping": {"CK1": 1, "ck1": 2}, "default_class_value": 0, } self.assertRaises(ValueError, ClassAttributeLookupTable, **lookup_tables) @tf.contrib.eager.run_test_in_graph_and_eager_modes def test_class_attribute_lookup_empty_table(self): lookup_tables = { "attribute_mapping": {}, "default_attribute_value": -1, "class_mapping": {}, "default_class_value": 0, } class_attribute_lookup_table = ClassAttributeLookupTable(**lookup_tables) polygon_2d_label = generate_polygon_2d_label( attribute_names=[[[["ak1", "ak3"], ["ak4"]]]], class_names=[[[["ck1"], ["ck2"]]]], ) mapped_polygon_2d_label = class_attribute_lookup_table(polygon_2d_label) if not tf.executing_eagerly(): self.evaluate(tf.compat.v1.tables_initializer()) self.assertAllEqual([0, 0], mapped_polygon_2d_label.classes.values) self.assertAllEqual([-1, -1, -1], mapped_polygon_2d_label.attributes.values) @tf.contrib.eager.run_test_in_graph_and_eager_modes def test_serialization_and_deserialization(self): """Test that it is a TAOObject that can be serialized and deserialized.""" lookup_tables = { "attribute_mapping": {"ak1": 1, "ak2": 2}, "default_attribute_value": 0, "class_mapping": {"ck1": 1, "ck2 ": 2, "ck3": 3, "ck3 ": 3}, "default_class_value": 0, } lookup_table = ClassAttributeLookupTable(**lookup_tables) lookup_table_dict = lookup_table.serialize() deserialized_lookup_table_dict = deserialize_tao_object(lookup_table_dict) self.assertAllEqual( lookup_table.class_keys, deserialized_lookup_table_dict.class_keys ) self.assertAllEqual( lookup_table.class_values, deserialized_lookup_table_dict.class_values ) self.assertAllEqual( lookup_table.default_class_value, deserialized_lookup_table_dict.default_class_value, ) self.assertAllEqual( lookup_table.attribute_keys, deserialized_lookup_table_dict.attribute_keys ) self.assertAllEqual( lookup_table.attribute_values, deserialized_lookup_table_dict.attribute_values, ) @tf.contrib.eager.run_test_in_graph_and_eager_modes @parameterized.expand( [[{"ak1": 0}, ["ak1"], [0]], [None, ["ak1"], None], [None, ["ak1", "ak2"], [0]]] ) def test_validate_key_value_lists_mapping( self, attribute_mapping, attribute_keys, attribute_values ): lookup_tables = { "attribute_mapping": attribute_mapping, "attribute_keys": attribute_keys, "attribute_values": attribute_values, "default_attribute_value": -1, "class_keys": ["ck1"], "class_values": [1], "default_class_value": 0, } self.assertRaises(ValueError, ClassAttributeLookupTable, **lookup_tables)
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/class_attribute_lookup_table_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for TemporalBatcher.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from parameterized import parameterized import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader import types from nvidia_tao_tf1.blocks.multi_source_loader.processors.temporal_batcher import ( TemporalBatcher, ) from nvidia_tao_tf1.blocks.multi_source_loader.types import test_fixtures from nvidia_tao_tf1.blocks.multi_source_loader.types.sequence_example import ( SequenceExample, ) class TemporalBatcherTest(tf.test.TestCase): def test_raises_with_invalid_size(self): with self.assertRaises(ValueError): TemporalBatcher(size=0) def test_raises_when_session_information_not_present(self): batcher = TemporalBatcher(size=2) example = SequenceExample(instances={"dummy": tf.constant(0)}, labels={}) dataset = tf.data.Dataset.from_tensors(example) with self.assertRaises(ValueError): dataset.apply(batcher) @parameterized.expand([[1], [2], [3], [4]]) def test_window_size(self, size): example = test_fixtures.make_example_3d(16, 32) dataset = tf.data.Dataset.from_tensors(example) dataset = dataset.repeat() dataset = dataset.apply(TemporalBatcher(size=size)) iterator = tf.compat.v1.data.make_one_shot_iterator(dataset) batch = iterator.get_next() # test for static shape inference self.assertEqual( [size, 3, 16, 32], batch.instances[types.FEATURE_CAMERA].images.shape.as_list(), ) with self.cached_session() as session: batched = session.run(batch) assert (size, 3, 16, 32) == batched.instances[ types.FEATURE_CAMERA ].images.shape
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/temporal_batcher_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for PolylineToPolygon processor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from parameterized import parameterized import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.processors.polyline_to_polygon import ( PolylineToPolygon, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.processor_test_case import ( ProcessorTestCase, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.coordinates2d import ( Coordinates2D, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.polygon2d_label import ( Polygon2DLabel, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.sparse_tensor_builder import ( SparseTensorBuilder, ) from nvidia_tao_tf1.core.coreobject import deserialize_tao_object ### # Convenience shorthands for building out the coordinate tensors ### class C(SparseTensorBuilder): """Coordinates.""" pass class Poly(SparseTensorBuilder): """Polygon/Polyline.""" pass class Frame(SparseTensorBuilder): """Frame.""" pass class Timestep(SparseTensorBuilder): """Timestep.""" pass class Batch(SparseTensorBuilder): """Batch.""" pass class Label(SparseTensorBuilder): """Class Label.""" pass ### def _get_label(label_builder, label_classes_builder): sparse_example = label_builder.build(val_type=tf.float32) sparse_classes = label_classes_builder.build(val_type=tf.int32) coordinates = Coordinates2D(coordinates=sparse_example, canvas_shape=tf.zeros(1)) label = Polygon2DLabel( vertices=coordinates, classes=sparse_classes, attributes=tf.zeros(1) ) return label class TestPolylineToPolygon(ProcessorTestCase): @parameterized.expand([[tuple()], [(5,)], [(12, 2)]]) def test_no_examples(self, batch_args): empty_polygon2d = self.make_empty_polygon2d_labels(*batch_args) processor = PolylineToPolygon(2, line_width=2.0, debug=True) with self.session() as sess: processed_input, processed_output = sess.run( [empty_polygon2d, processor.process(empty_polygon2d)] ) self.assertSparseEqual( processed_input.vertices.coordinates, processed_output.vertices.coordinates, ) self.assertSparseEqual(processed_input.classes, processed_output.classes) def test_no_convert_single_example(self): example = Frame( Poly(C(0, 0), C(1, 0), C(1, 1), C(0, 1)), Poly(C(0, 2), C(1, 1), C(0, 0)) ) example_classes = Frame(Label(0), Label(5)) self._test_no_convert(example, example_classes) def test_no_convert_batch(self): example = Batch( Frame(Poly(C(0, 0), C(2, 2), C(2, 0))), Frame( Poly(C(5, 5), C(6, 5), C(6, 6), C(5, 6)), Poly(C(1, 5), C(5, 5), C(6, 5)), ), ) example_classes = Batch(Frame(Label(0)), Frame(Label(1), Label(3))) self._test_no_convert(example, example_classes) def test_convert_simple(self): example = Frame(Poly(C(0, 0), C(1, 0), C(1, 1))) example_classes = Frame(Label(2)) # Gets converted target_example = Frame( Poly(C(0, -1), C(1, -1), C(2, 0), C(1, 1), C(0, 1), C(-1, 0)), Poly(C(2, 0), C(2, 1), C(1, 2), C(0, 1), C(0, 0), C(1, -1)), ) target_example_classes = Frame( Label(2), # The polyline has 2 segments, so 2 polygons will be created Label(2), ) self._test_convert( example, example_classes, target_example, target_example_classes ) def test_convert_empty_first(self): example = Batch(Timestep(Frame(), Frame(Poly(C(0, 0), C(1, 0))))) example_classes = Batch(Timestep(Frame(), Frame(Label(2)))) # Gets converted target_example = Batch( Timestep( Frame(), Frame(Poly(C(0, -1), C(1, -1), C(2, 0), C(1, 1), C(0, 1), C(-1, 0))), ) ) target_example_classes = example_classes self._test_convert( example, example_classes, target_example, target_example_classes ) def test_convert_piece(self): example = Frame( Poly(C(1, 1), C(3, 1), C(2, 2)), Poly(C(0, 0), C(0, 1)), Poly(C(5, 5), C(6, 5), C(6, 6), C(5, 6)), ) example_classes = Frame(Label(0), Label(2), Label(1)) # Gets converted target_example = Frame( Poly(C(1, 1), C(3, 1), C(2, 2)), Poly(C(1, 0), C(1, 1), C(0, 2), C(-1, 1), C(-1, 0), C(0, -1)), Poly(C(5, 5), C(6, 5), C(6, 6), C(5, 6)), ) target_example_classes = example_classes self._test_convert( example, example_classes, target_example, target_example_classes ) def _test_no_convert(self, example, example_classes): self._test_convert(example, example_classes, example, example_classes) def _test_convert( self, example, example_classes, target_example, target_example_classes ): input_labels = _get_label(example, example_classes) target_labels = _get_label(target_example, target_example_classes) processor = PolylineToPolygon(2, line_width=2.0, debug=True) with self.session() as sess: processed_converted, processed_target = sess.run( [processor.process(input_labels), target_labels] ) self.assertSparseEqual( processed_target.vertices.coordinates, processed_converted.vertices.coordinates, ) self.assertSparseEqual( processed_target.classes, processed_converted.classes ) def test_serialization_and_deserialization(self): """Test that it is a TAOObject that can be serialized and deserialized.""" processor = PolylineToPolygon(class_id=2, line_width=2.0, debug=True) processor_dict = processor.serialize() deserialized_dict = deserialize_tao_object(processor_dict) assert processor.class_id == deserialized_dict.class_id assert processor.line_width == deserialized_dict.line_width assert processor.debug == deserialized_dict.debug
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/polyline_to_polygon_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Processor for converting between data formats. Used internally by pipelines.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.data_format import ( CHANNELS_FIRST, CHANNELS_LAST, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.processor import ( Processor, ) from nvidia_tao_tf1.blocks.multi_source_loader.types import ( Example, FEATURE_CAMERA, ) from nvidia_tao_tf1.core.coreobject import save_args class ConvertDataFormat(Processor): """Processor for converting between data formats. Used internally by pipelines.""" NCHW_TO_NHWC = (0, 2, 3, 1) NHWC_TO_NCHW = (0, 3, 1, 2) @save_args def __init__(self, input_format, output_format): """Construct a converter. Args: input_format (DataFormat): Input data format. output_format (DataFormat): Output data format that inputs will be converted to. """ super(ConvertDataFormat, self).__init__() self._input_format = input_format self._output_format = output_format def can_compose(self, other): """ Determine whether two processors can be composed into a single one. Args: other (Processor): Other processor instance. Returns: (Boolean): False - composition not supported. """ return False def compose(self, other): """Compose two processors into a single one. Args: other (Processor): Other processor instance. Raises: RuntimeError: Always raises exception because composition is not supported. """ raise RuntimeError( "Compose called on ConvertDataFormat which is not composable" ) def supported_formats(self): """Return supported data formats.""" return [CHANNELS_FIRST, CHANNELS_LAST] def process(self, example): """ Convert input data format to match the data_format of this processor. Args: example (Example): Example with frames in source data_format. Returns: (Example): Examples with frames converted to target format. """ if self._input_format == self._output_format: return example frames = example.instances[FEATURE_CAMERA] if ( self._input_format == CHANNELS_LAST and self._output_format == CHANNELS_FIRST ): return Example( instances={ FEATURE_CAMERA: tf.transpose(a=frames, perm=self.NHWC_TO_NCHW) }, labels=example.labels, ) if ( self._input_format == CHANNELS_FIRST and self._output_format == CHANNELS_LAST ): return Example( instances={ FEATURE_CAMERA: tf.transpose(a=frames, perm=self.NCHW_TO_NHWC) }, labels=example.labels, ) raise RuntimeError( "Unhandled conversion - from: {} to {}".format( self._input_format, self._output_format ) )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/convert_data_format.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for Scale processor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import re import numpy as np from parameterized import parameterized import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.processors.processor_test_case import ( ProcessorTestCase, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.scale import Scale from nvidia_tao_tf1.blocks.multi_source_loader.types import ( Example, FEATURE_CAMERA, LABEL_MAP, ) from nvidia_tao_tf1.blocks.multi_source_loader.types import Images2D from nvidia_tao_tf1.blocks.multi_source_loader.types import Images2DReference from nvidia_tao_tf1.blocks.multi_source_loader.types import ( test_fixtures as fixtures, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.sequence_example import ( SequenceExample, TransformedExample, ) from nvidia_tao_tf1.core.coreobject import deserialize_tao_object class TestScale(ProcessorTestCase): @parameterized.expand( [ [-10, 20, re.escape("Scale.height (-10) is not positive.")], [10, -20, re.escape("Scale.width (-20) is not positive.")], ] ) def test_raises_on_invalid_bounds(self, height, width, message): with self.assertRaisesRegexp(ValueError, message): Scale(height=height, width=width) @parameterized.expand([[1, 1], [1, 2], [2, 1]]) def test_valid_bounds_do_not_raise(self, height, width): Scale(height=height, width=width) def test_scales_down(self): frames = tf.ones((1, 128, 240, 3)) labels = self.make_polygon_label([[0.5, 0.0], [1.0, 1.0], [0.0, 1.0]]) example = Example( instances={FEATURE_CAMERA: frames}, labels={LABEL_MAP: labels} ) expected_frames = tf.ones((1, 64, 120, 3)) expected_labels = self.make_polygon_label([[0.25, 0.0], [0.5, 0.5], [0.0, 0.5]]) with self.test_session(): scale = Scale(height=64, width=120) scaled = scale.process(example) self.assertAllClose( expected_frames.eval(), scaled.instances[FEATURE_CAMERA].eval() ) self.assert_labels_close(expected_labels, scaled.labels[LABEL_MAP]) @parameterized.expand( [ [Scale(height=96, width=180), Scale(height=32, width=60)], [Scale(height=97, width=181), Scale(height=31, width=61)], ] ) def test_composite_of_two_processors_outputs_same(self, first, second): frames = tf.ones((1, 128, 240, 3)) labels = self.make_polygon_label([[0.5, 0.0], [1.0, 1.0], [0.0, 1.0]]) example = Example( instances={FEATURE_CAMERA: frames}, labels={LABEL_MAP: labels} ) composite = first.compose(second) with self.test_session(): first_processed = first.process(example) second_processed = second.process(first_processed) composite_processed = composite.process(example) self.assertEqual( second_processed.instances[FEATURE_CAMERA].get_shape(), composite_processed.instances[FEATURE_CAMERA].get_shape(), ) self.assertAllClose( second_processed.instances[FEATURE_CAMERA].eval(), composite_processed.instances[FEATURE_CAMERA].eval(), ) self.assert_labels_close( second_processed.labels[LABEL_MAP], composite_processed.labels[LABEL_MAP], ) @parameterized.expand([[False], [True]]) def test_unbatched_scale(self, use_images2d_reference): original_width = 80 original_height = 64 example = fixtures.make_example( width=original_width, height=original_height, use_images2d_reference=use_images2d_reference, ) new_width = original_width * 2 new_height = original_height * 2 augmentation = Scale(width=new_width, height=new_height) processed = augmentation.process(example) assert type(processed) == TransformedExample processed = tf.compat.v1.Session().run(processed) expected_canvas_width = (1, 1, new_width) expected_canvas_height = (1, 1, new_height) # Check transformation canvas shape. assert ( processed.transformation.canvas_shape.width.shape == expected_canvas_width ) assert ( processed.transformation.canvas_shape.height.shape == expected_canvas_height ) # Check color transformation matrix. expected_color_matrix = np.array( [ [ [1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0], ] ] ) np.testing.assert_equal( processed.transformation.color_transform_matrix, expected_color_matrix ) # Check spatial transformation matrix. Note that transformation happen from output # to input, that's why the diagonal entries are 0.5 instead of 2.0. expected_spatial_matrix = np.array( [[[0.5, 0.0, 0.0], [0.0, 0.5, 0.0], [0.0, 0.0, 1.0]]] ) np.testing.assert_equal( processed.transformation.spatial_transform_matrix, expected_spatial_matrix ) @parameterized.expand([[False, False], [True, False], [False, True], [True, True]]) def test_batched_scale(self, dynamic_batch_size, use_images2d_reference): feed_dict = {} expected_batch_size = 3 if dynamic_batch_size: batch_size = tf.compat.v1.placeholder(dtype=tf.int32) feed_dict = {batch_size: expected_batch_size} else: batch_size = expected_batch_size shapes_per_frame = [] coordinate_values = [] for i in range(expected_batch_size): shapes_per_frame.append([1]) coordinate_values.append([float(i), 0.0]) coordinate_values.append([float(i) + 10.0, 5.0]) coordinate_values.append([float(i) + 8.0, 9.0]) original_width = 80 original_height = 64 new_width = original_width * 2 new_height = original_height * 2 expected_shape = (1, 3, new_height, new_width) expected_canvas_width = (1, new_width) expected_canvas_height = (1, new_height) if batch_size is not None: expected_shape = (expected_batch_size,) + expected_shape expected_canvas_height = (expected_batch_size,) + expected_canvas_height expected_canvas_width = (expected_batch_size,) + expected_canvas_width example = fixtures.make_example( width=original_width, height=original_height, example_count=batch_size, shapes_per_frame=shapes_per_frame, coordinates_per_polygon=3, coordinate_values=coordinate_values, use_images2d_reference=use_images2d_reference, ) original = tf.compat.v1.Session().run(example, feed_dict=feed_dict) augmentation = Scale(width=new_width, height=new_height) # Compute transformations. processed = augmentation.process(example) assert type(processed) == TransformedExample # Peek transformation. transformation = tf.compat.v1.Session().run( processed.transformation, feed_dict=feed_dict ) # Check transformation canvas shape. assert transformation.canvas_shape.width.shape == expected_canvas_width assert transformation.canvas_shape.height.shape == expected_canvas_height # Check color transformation matrix. expected_color_matrix = np.array( [ [ [1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0], ] ] ) expected_color_matrix = np.tile( expected_color_matrix, [expected_batch_size, 1, 1] ) np.testing.assert_equal( transformation.color_transform_matrix, expected_color_matrix ) # Check spatial transformation matrix. Note that transformation happen from output # to input, that's why the diagonal entries are 0.5 instead of 2.0. expected_spatial_matrix = np.array( [[[0.5, 0.0, 0.0], [0.0, 0.5, 0.0], [0.0, 0.0, 1.0]]] ) expected_spatial_matrix = np.tile( expected_spatial_matrix, [expected_batch_size, 1, 1] ) np.testing.assert_equal( transformation.spatial_transform_matrix, expected_spatial_matrix ) # Apply transformations to examples. processed = processed() assert type(processed) == SequenceExample processed = tf.compat.v1.Session().run(processed, feed_dict=feed_dict) feature_camera = processed.instances[FEATURE_CAMERA] if type(feature_camera) == Images2D: # Check image shape. assert feature_camera.images.shape == expected_shape assert feature_camera.canvas_shape.width.shape == expected_canvas_width assert feature_camera.canvas_shape.height.shape == expected_canvas_height else: # Images2DReference hasn't been processed yet. assert type(feature_camera) == Images2DReference assert feature_camera.input_width == original_width assert feature_camera.input_height == original_height # Check vertices canvas shape. vertices_canvas_shape = processed.labels[LABEL_MAP].vertices.canvas_shape assert vertices_canvas_shape.width.shape == expected_canvas_width assert vertices_canvas_shape.height.shape == expected_canvas_height # Check that processed coords are 2x the original ones. original_coordinates = original.labels[LABEL_MAP].vertices.coordinates processed_coordinates = processed.labels[LABEL_MAP].vertices.coordinates np.testing.assert_equal( original_coordinates.values * 2.0, processed_coordinates.values ) np.testing.assert_equal( original_coordinates.indices, processed_coordinates.indices ) np.testing.assert_equal( original_coordinates.dense_shape, processed_coordinates.dense_shape ) def test_serialization_and_deserialization(self): """Test that it is a TAOObject that can be serialized and deserialized.""" augmentation = Scale(width=10, height=12) augmentation_dict = augmentation.serialize() deserialized_augmentation = deserialize_tao_object(augmentation_dict) self.assertEqual( str(augmentation._transform), str(deserialized_augmentation._transform) )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/scale_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Main test for polyline_clipper.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import pytest import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.processors.polyline_clipper import ( PolylineClipper, ) @pytest.fixture(scope="session") def _clipper(): return PolylineClipper(5) # Model input width and height is 960 x 504. polylines = np.array( [ [300, 500], [300, 375], [300, 250], [300, 125], [300, 4], [660, 4], [660, 125], [660, 250], [660, 375], [660, 500], [390, 350], [390, 300], [390, 250], [390, 200], [390, 150], [570, 150], [570, 200], [570, 250], [570, 300], [570, 350], [950, 400], [715, 400], [480, 400], [245, 400], [10, 400], [10, 100], [245, 100], [480, 100], [715, 100], [950, 100], [720, 325], [600, 325], [480, 325], [360, 325], [240, 325], [240, 175], [360, 175], [480, 175], [600, 175], [720, 175], ], dtype=float, ) vertices_per_polyline = np.array([5, 5, 5, 5, 5, 5, 5, 5], dtype=int) class_ids_per_polyline = np.array([0, 0, 1, 1, 2, 2, 3, 3], dtype=int) attributes_per_polyline = np.array([-1, 1, -1, 1, -1, 1, -1, 1], dtype=int) polylines_all_inside = np.array( [ [300, 500], [300, 375], [300, 250], [300, 125], [300, 4], [660, 4], [660, 125], [660, 250], [660, 375], [660, 500], [390, 350], [390, 300], [390, 250], [390, 200], [390, 150], [570, 150], [570, 200], [570, 250], [570, 300], [570, 350], [10, 400], [245, 400], [480, 400], [715, 400], [950, 400], [950, 100], [715, 100], [480, 100], [245, 100], [10, 100], [240, 325], [360, 325], [480, 325], [600, 325], [720, 325], [720, 175], [600, 175], [480, 175], [360, 175], [240, 175], ], dtype=float, ) polylines_width_test = np.array( [ [30, 500], [30, 375], [30, 250], [30, 125], [30, 4], [930, 4], [930, 125], [930, 250], [930, 375], [930, 500], [255, 350], [255, 300], [255, 250], [255, 200], [255, 150], [705, 150], [705, 200], [705, 250], [705, 300], [705, 350], [0, 400], [480, 400], [960, 400], [960, 100], [480, 100], [0, 100], [0, 325], [180, 325], [480, 325], [780, 325], [960, 325], [960, 175], [780, 175], [480, 175], [180, 175], [0, 175], ], dtype=float, ) polylines_width_test_maintained_vertices = np.array( [ [30, 500], [30, 375], [30, 250], [30, 125], [30, 4], [930, 500], [930, 375], [930, 250], [930, 125], [930, 4], [255, 350], [255, 300], [255, 250], [255, 200], [255, 150], [705, 350], [705, 300], [705, 250], [705, 200], [705, 150], [0, 400], [480, 400], [960, 400], [960, 400], [960, 400], [960, 100], [480, 100], [0, 100], [0, 100], [0, 100], [0, 325], [180, 325], [480, 325], [780, 325], [960, 325], [960, 175], [780, 175], [480, 175], [180, 175], [0, 175], ], dtype=float, ) polylines_height_test = np.array( [ [300, 504], [300, 250], [300, 0], [660, 0], [660, 250], [660, 504], [390, 500], [390, 375], [390, 250], [390, 125], [390, 0], [570, 0], [570, 125], [570, 250], [570, 375], [570, 500], [240, 437.5], [360, 437.5], [480, 437.5], [600, 437.5], [720, 437.5], [720, 62.5], [600, 62.5], [480, 62.5], [360, 62.5], [240, 62.5], ], dtype=float, ) polylines_height_test_maintained_vertices = np.array( [ [300, 504], [300, 250], [300, 0], [300, 0], [300, 0], [660, 504], [660, 504], [660, 504], [660, 250], [660, 0], [390, 500], [390, 375], [390, 250], [390, 125], [390, 0], [570, 500], [570, 375], [570, 250], [570, 125], [570, 0], [240, 437.5], [360, 437.5], [480, 437.5], [600, 437.5], [720, 437.5], [720, 62.5], [600, 62.5], [480, 62.5], [360, 62.5], [240, 62.5], ], dtype=float, ) polylines_u_test = np.array( [ [210, 504], [210, 437.5], [210, 250], [210, 62.5], [210, 0], [345, 400], [345, 325], [345, 250], [345, 175], [345, 100], [615, 100], [615, 175], [615, 250], [615, 325], [615, 400], [960, 475], [832.5, 475], [480, 475], [127.5, 475], [0, 475], [840, 362.5], [660, 362.5], [480, 362.5], [300, 362.5], [120, 362.5], [120, 137.5], [300, 137.5], [480, 137.5], [660, 137.5], [840, 137.5], ], dtype=float, ) polylines_u_test_maintained_vertices = np.array( [ [210, 504], [210, 437.5], [210, 250], [210, 62.5], [210, 0], [210, 0], [210, 0], [210, 0], [210, 0], [210, 0], [345, 400], [345, 325], [345, 250], [345, 175], [345, 100], [615, 400], [615, 325], [615, 250], [615, 175], [615, 100], [960, 475], [832.5, 475], [480, 475], [127.5, 475], [0, 475], [0, 475], [0, 475], [0, 475], [0, 475], [0, 475], [840, 362.5], [660, 362.5], [480, 362.5], [300, 362.5], [120, 362.5], [120, 137.5], [300, 137.5], [480, 137.5], [660, 137.5], [840, 137.5], ], dtype=float, ) polylines_reordered = np.array( [ [300, 500], [300, 375], [300, 250], [300, 125], [300, 4], [660, 500], [660, 375], [660, 250], [660, 125], [660, 4], [390, 350], [390, 300], [390, 250], [390, 200], [390, 150], [570, 350], [570, 300], [570, 250], [570, 200], [570, 150], [10, 400], [245, 400], [480, 400], [715, 400], [950, 400], [950, 100], [715, 100], [480, 100], [245, 100], [10, 100], [240, 325], [360, 325], [480, 325], [600, 325], [720, 325], [720, 175], [600, 175], [480, 175], [360, 175], [240, 175], ], dtype=float, ) # Test possible configurations of the paths and image mask. clipping_tests = [ # all inside. ( polylines, vertices_per_polyline, class_ids_per_polyline, attributes_per_polyline, 0, 0, 1, 1, False, polylines_all_inside, vertices_per_polyline, class_ids_per_polyline, attributes_per_polyline, ), # all outside. ( polylines, vertices_per_polyline, class_ids_per_polyline, attributes_per_polyline, 0, 504, 1, 1, False, np.empty((0, 2)), [], [], [], ), # stradle width ( polylines, vertices_per_polyline, class_ids_per_polyline, attributes_per_polyline, -720, 0, 2.5, 1, False, polylines_width_test, [5, 5, 5, 5, 3, 3, 5, 5], class_ids_per_polyline, attributes_per_polyline, ), # stradle height ( polylines, vertices_per_polyline, class_ids_per_polyline, attributes_per_polyline, 0, -375, 1, 2.5, False, polylines_height_test, [3, 3, 5, 5, 5, 5], [0, 0, 1, 1, 3, 3], [-1, 1, -1, 1, -1, 1], ), # increase in number (u-shape) ( polylines, np.array([10, 10, 10, 10], dtype=int), np.array([0, 1, 2, 3], dtype=int), np.array([1, 1, 1, 1], dtype=int), -240, -125, 1.5, 1.5, False, polylines_u_test, [5, 10, 5, 10], [0, 1, 2, 3], [1, 1, 1, 1], ), # all inside. ( polylines, vertices_per_polyline, class_ids_per_polyline, attributes_per_polyline, 0, 0, 1, 1, True, polylines_reordered, vertices_per_polyline, class_ids_per_polyline, attributes_per_polyline, ), # all outside. ( polylines, vertices_per_polyline, class_ids_per_polyline, attributes_per_polyline, 0, 504, 1, 1, True, np.empty((0, 2)), [], [], [], ), # stradle width ( polylines, vertices_per_polyline, class_ids_per_polyline, attributes_per_polyline, -720, 0, 2.5, 1, True, polylines_width_test_maintained_vertices, [5, 5, 5, 5, 5, 5, 5, 5], class_ids_per_polyline, attributes_per_polyline, ), # stradle height ( polylines, vertices_per_polyline, class_ids_per_polyline, attributes_per_polyline, 0, -375, 1, 2.5, True, polylines_height_test_maintained_vertices, [5, 5, 5, 5, 5, 5], [0, 0, 1, 1, 3, 3], [-1, 1, -1, 1, -1, 1], ), # increase in number (u-shape) ( polylines, np.array([10, 10, 10, 10], dtype=int), np.array([0, 1, 2, 3], dtype=int), np.array([1, 1, 1, 1], dtype=int), -240, -125, 1.5, 1.5, True, polylines_u_test_maintained_vertices, [10, 10, 10, 10], [0, 1, 2, 3], [1, 1, 1, 1], ), ] # Model input width and height is 960 x 504. polylines = np.array( [ [300, 500], [300, 375], [300, 250], [300, 125], [300, 4], [660, 4], [660, 125], [660, 250], [660, 375], [660, 500], [390, 350], [390, 300], [390, 250], [390, 200], [390, 150], [570, 150], [570, 200], [570, 250], [570, 300], [570, 350], [950, 400], [715, 400], [480, 400], [245, 400], [10, 400], [10, 100], [245, 100], [480, 100], [715, 100], [950, 100], [720, 325], [600, 325], [480, 325], [360, 325], [240, 325], [240, 175], [360, 175], [480, 175], [600, 175], [720, 175], ], dtype=float, ) vertices_per_polyline = np.array([5, 5, 5, 5, 5, 5, 5, 5], dtype=int) class_ids_per_polyline = np.array([0, 0, 1, 1, 2, 2, 3, 3], dtype=int) attributes_per_polyline = np.array([-1, 1, -1, 1, -1, 1, -1, 1], dtype=int) @pytest.mark.parametrize( "polylines, vertices_per_polyline, class_ids_per_polyline, " "attributes_per_polyline, translate_x, translate_y, " "scale_x, scale_y, maintain_vertex_number, expected_polylines, " "expected_vertices, expected_classes, expected_attributes", clipping_tests, ) def test_polyline_clipping( _clipper, polylines, vertices_per_polyline, class_ids_per_polyline, attributes_per_polyline, translate_x, translate_y, scale_x, scale_y, maintain_vertex_number, expected_polylines, expected_vertices, expected_classes, expected_attributes, ): """Test the polyline clipper.""" # Adjust coordinates based on inputs. polylines_modified = polylines.copy() polylines_modified[:, 0] = (polylines[:, 0] * scale_x) + translate_x polylines_modified[:, 1] = (polylines[:, 1] * scale_y) + translate_y image_height = 504 image_width = 960 polygon_mask = tf.constant( [ [0, 0], [0, image_height], [image_width, image_height], [image_width, 0], [0, 0], ], tf.float32, ) sess = tf.compat.v1.Session() ( clipped_polylines, _, clipped_vertices_per_polyline, clipped_class_ids_per_polyline, clipped_attributes_per_polyline, ) = sess.run( _clipper.clip( tf.constant(polylines_modified, dtype=tf.float32), tf.constant(vertices_per_polyline, dtype=tf.int32), tf.constant(class_ids_per_polyline, dtype=tf.int32), tf.constant(attributes_per_polyline, dtype=tf.int32), maintain_vertex_number=maintain_vertex_number, polygon_mask=polygon_mask, ) ) np.testing.assert_almost_equal(clipped_polylines, expected_polylines, 4) np.testing.assert_almost_equal(clipped_vertices_per_polyline, expected_vertices, 4) np.testing.assert_almost_equal(clipped_class_ids_per_polyline, expected_classes, 4) np.testing.assert_almost_equal( clipped_attributes_per_polyline, expected_attributes, 4 ) unspliting_tests = [ # no splits. ( polylines, vertices_per_polyline, np.array([0, 1, 2, 3, 4, 5, 6, 7], dtype=int), polylines, vertices_per_polyline, np.array([0, 1, 2, 3, 4, 5, 6, 7], dtype=int), ), # two splits. ( polylines, vertices_per_polyline, np.array([0, 0, 1, 2, 3, 3, 4, 5], dtype=int), np.take( polylines, np.hstack( (np.hstack((np.arange(0, 5), np.arange(10, 25))), np.arange(30, 40)) ), axis=0, ), vertices_per_polyline[0:6], np.array([0, 1, 2, 3, 4, 5], dtype=int), ), ] @pytest.mark.parametrize( "polylines, vertices_per_polyline, polyline_index_map, " "expected_polylines, expected_vertices, expected_indices", unspliting_tests, ) def test_remove_split_polylines( _clipper, polylines, vertices_per_polyline, polyline_index_map, expected_polylines, expected_vertices, expected_indices, ): ( clipped_polylines, clipped_vertices_per_polyline, clipped_polyline_index_map, ) = _clipper._remove_split_polylines( polylines, vertices_per_polyline, polyline_index_map ) np.testing.assert_almost_equal(clipped_polylines, expected_polylines, 4) np.testing.assert_almost_equal(clipped_vertices_per_polyline, expected_vertices, 4) np.testing.assert_almost_equal(clipped_polyline_index_map, expected_indices, 4) polylines_bottom_up = np.array( [ [300, 500], [300, 375], [300, 250], [300, 125], [300, 4], [660, 500], [660, 375], [660, 250], [660, 125], [660, 4], [390, 350], [390, 300], [390, 250], [390, 200], [390, 150], [570, 350], [570, 300], [570, 250], [570, 200], [570, 150], [950, 400], [715, 400], [480, 400], [245, 400], [10, 400], [10, 100], [245, 100], [480, 100], [715, 100], [950, 100], [720, 325], [600, 325], [480, 325], [360, 325], [240, 325], [240, 175], [360, 175], [480, 175], [600, 175], [720, 175], ], dtype=float, ) reordering_tests = [(polylines, polylines_bottom_up)] @pytest.mark.parametrize("polylines, expected_polylines", reordering_tests) def test_enforce_bottom_up_vertex_order(_clipper, polylines, expected_polylines): sess = tf.compat.v1.Session() bottom_up_ordered = sess.run(_clipper._enforce_bottom_up_vertex_order(polylines)) np.testing.assert_equal(expected_polylines, bottom_up_ordered) polylines_shortened = np.array( [ [300, 500], [300, 250], [300, 4], [660, 4], [660, 125], [660, 250], [660, 375], [660, 500], [390, 350], [390, 300], [390, 250], [390, 200], [390, 150], [950, 150], [570, 200], [950, 400], [715, 400], [480, 400], [245, 400], [10, 400], [10, 100], [245, 100], [720, 325], [600, 325], [480, 325], [360, 325], [240, 325], [240, 175], [360, 175], [480, 175], [600, 175], [720, 175], ], dtype=float, ) polylines_resampled = np.array( [ [300, 500], [300, 250], [300, 4], [300, 4], [300, 4], [660, 4], [660, 125], [660, 250], [660, 375], [660, 500], [390, 350], [390, 300], [390, 250], [390, 200], [390, 150], [950, 150], [570, 200], [570, 200], [570, 200], [570, 200], [950, 400], [715, 400], [480, 400], [245, 400], [10, 400], [10, 100], [245, 100], [245, 100], [245, 100], [245, 100], [720, 325], [600, 325], [480, 325], [360, 325], [240, 325], [240, 175], [360, 175], [480, 175], [600, 175], [720, 175], ], dtype=float, ) resampling_tests = [ ( polylines_shortened, np.array([3, 5, 5, 2, 5, 2, 5, 5], dtype=int), polylines_resampled, vertices_per_polyline, np.array([0, 1, 2, 3, 4, 5, 6, 7], dtype=int), ), ( polylines, vertices_per_polyline, polylines, vertices_per_polyline, np.array([0, 1, 2, 3, 4, 5, 6, 7], dtype=int), ), ] @pytest.mark.parametrize( "polylines, vertices, expected_polylines, expected_vertices, index_map", resampling_tests, ) def test_resample_shortened_polylines( _clipper, polylines, vertices, expected_polylines, expected_vertices, index_map ): resampled_polylines, resampled_vertices = _clipper._resample_shortened_polylines( polylines, vertices, expected_vertices, index_map ) np.testing.assert_almost_equal(resampled_polylines, expected_polylines) np.testing.assert_almost_equal(resampled_vertices, expected_vertices, 4) attributes_per_polyline_with_path_attributes = np.array( [-1, 1, -1, 1, -1, 1, -1, 1, 2, 2, 3, 3, 2, 2, 3, 3], dtype=int ) clipping_tests_with_path_attributes = [ # all inside. ( polylines, vertices_per_polyline, class_ids_per_polyline, attributes_per_polyline_with_path_attributes, 0, 0, 1, 1, False, polylines_all_inside, [0, 1, 2, 3, 4, 5, 6, 7], vertices_per_polyline, class_ids_per_polyline, attributes_per_polyline_with_path_attributes, ), # all outside. ( polylines, vertices_per_polyline, class_ids_per_polyline, attributes_per_polyline_with_path_attributes, 0, 504, 1, 1, False, np.empty((0, 2)), [], [], [], [], ), # straddle width. ( polylines, vertices_per_polyline, class_ids_per_polyline, attributes_per_polyline_with_path_attributes, -720, 0, 2.5, 1, False, polylines_width_test, [0, 1, 2, 3, 4, 5, 6, 7], [5, 5, 5, 5, 3, 3, 5, 5], class_ids_per_polyline, attributes_per_polyline_with_path_attributes, ), # straddle height. ( polylines, vertices_per_polyline, class_ids_per_polyline, attributes_per_polyline_with_path_attributes, 0, -375, 1, 2.5, False, polylines_height_test, [0, 1, 2, 3, 6, 7], [3, 3, 5, 5, 5, 5], [0, 0, 1, 1, 3, 3], [-1, 1, -1, 1, -1, 1, 2, 2, 3, 3, 3, 3], ), ] @pytest.mark.parametrize( "polylines, vertices_per_polyline, class_ids_per_polyline, " "attributes_per_polyline_with_path_attributes, translate_x, translate_y, " "scale_x, scale_y, maintain_vertex_number, expected_polylines, " "expected_index_map, expected_vertices, expected_classes, " "expected_attributes", clipping_tests_with_path_attributes, ) def test_polyline_clipping_with_path_attributes( _clipper, polylines, vertices_per_polyline, class_ids_per_polyline, attributes_per_polyline_with_path_attributes, translate_x, translate_y, scale_x, scale_y, maintain_vertex_number, expected_polylines, expected_index_map, expected_vertices, expected_classes, expected_attributes, ): """Test the polyline clipper.""" # Adjust coordinates based on inputs. polylines_modified = polylines.copy() polylines_modified[:, 0] = (polylines[:, 0] * scale_x) + translate_x polylines_modified[:, 1] = (polylines[:, 1] * scale_y) + translate_y image_height = 504 image_width = 960 polygon_mask = tf.constant( [ [0, 0], [0, image_height], [image_width, image_height], [image_width, 0], [0, 0], ], tf.float32, ) sess = tf.compat.v1.Session() ( clipped_polylines, clipped_polyline_index_map, clipped_vertices_per_polyline, clipped_class_ids_per_polyline, clipped_attributes_per_polyline, ) = sess.run( _clipper.clip( tf.constant(polylines_modified, dtype=tf.float32), tf.constant(vertices_per_polyline, dtype=tf.int32), tf.constant(class_ids_per_polyline, dtype=tf.int32), tf.constant(attributes_per_polyline_with_path_attributes, dtype=tf.int32), maintain_vertex_number=maintain_vertex_number, polygon_mask=polygon_mask, ) ) np.testing.assert_almost_equal(clipped_polylines, expected_polylines, decimal=4) np.testing.assert_almost_equal( clipped_polyline_index_map, expected_index_map, decimal=4 ) np.testing.assert_almost_equal( clipped_vertices_per_polyline, expected_vertices, decimal=4 ) np.testing.assert_almost_equal( clipped_class_ids_per_polyline, expected_classes, decimal=4 ) np.testing.assert_almost_equal( clipped_attributes_per_polyline, expected_attributes, decimal=4 )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/polyline_clipper_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A lookup table for mapping input classes and attributes to output classes.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.types import Polygon2DLabel from nvidia_tao_tf1.core.coreobject import TAOObject, save_args from nvidia_tao_tf1.core.processors import LookupTable def _string_normalize(s): return s.strip().lower() def _normalize_reduce(keys, values): """Returns a tuple of (keys,values) with unique keys after string trimming.""" reduced = dict() for k, v in zip(keys, values): strip_key = _string_normalize(k) if strip_key not in reduced: reduced[strip_key] = v elif reduced[strip_key] != v: raise ValueError( "Error: Duplicate keys after trimming had different values." ) keys = [] values = [] for k, v in reduced.items(): keys.append(k) values.append(v) return keys, values def _tf_normalize(x): y = np.empty(x.shape, dtype=x.dtype) for i, val in enumerate(x): y[i] = _string_normalize(val) return y def _normalize_tf_strings(inputs): return tf.compat.v1.py_func(_tf_normalize, [inputs], tf.string, stateful=False) def _validate_key_value_lists(mapping, key_list, value_list): if key_list and mapping: raise ValueError( "Specify the key and values as separate lists, or a dictionary" " that maps keys to values, not both." ) if mapping: return list(mapping.keys()), list(mapping.values()) if not key_list and not value_list: return [], [] if not (key_list and value_list): raise ValueError("Both lists need to be specified.") if len(key_list) != len(value_list): raise ValueError("Keys and values must have same length.") return key_list, value_list class ClassAttributeLookupTable(TAOObject): """A lookup table for mapping input classes and attributes to output classes.""" # TODO(mlehr): Delete the key and value lists once all the specs are updated. @save_args def __init__( self, default_attribute_value, default_class_value, attribute_mapping=None, class_mapping=None, attribute_keys=None, attribute_values=None, class_keys=None, class_values=None, **kwargs ): """ Construct a ClassAttributeLookupTable class. Args: default_attribute_value (int): Default value for attribute lookup table. default_class_value (int): Default value for class lookup table. attribute_mapping (dict): Mapping from attribute names to attribute ids. class_mapping (dict): Mapping from class names to class ids. attribute_keys (list of strings): Keys of attirbute lookup table. attribute_values (list of strings): Values of attribute lookup table. class_keys (list of strings): Keys of class_id lookup table. class_values (list of strings): Values of class_id lookup table. Raises: ValueError: If multiple ``keys`` after string normalizing become duplicates, but their ``values`` are different. The keys for attributes, as well as classes, are normalized. Example: keys = ["cls1", " cLs1 "] values = [1, 2] Current string normalization includes whitespace trimming on both ends, as well as case insensitivity. """ super(ClassAttributeLookupTable, self).__init__(**kwargs) attribute_keys, attribute_values = _validate_key_value_lists( attribute_mapping, attribute_keys, attribute_values ) class_keys, class_values = _validate_key_value_lists( class_mapping, class_keys, class_values ) self.class_keys, self.class_values = _normalize_reduce(class_keys, class_values) self.default_class_value = default_class_value self.attribute_keys, self.attribute_values = _normalize_reduce( attribute_keys, attribute_values ) self.default_attribute_value = default_attribute_value def __call__(self, polygon_2d_label): """Map text class and attribute names to numeric ids. Args: polygon_2d_label (Polygon2DLabel): A label containing 2D polygons/polylines and their associated classes and attributes. The first two dimensions of each tensor that this structure contains should be batch/example(B) followed by a frame/time dimension(T). The rest of the dimensions encode type specific information. See Polygon2DLabel documentation for details Returns: (Polygon2DLabel): The label with the classes and attributes mapped to numeric ids. """ if self.class_keys: class_lookup_table = LookupTable( keys=self.class_keys, values=self.class_values, default_value=self.default_class_value, ) else: class_lookup_table = None if self.attribute_keys: attribute_lookup_table = LookupTable( keys=self.attribute_keys, values=self.attribute_values, default_value=self.default_attribute_value, ) else: attribute_lookup_table = None classes = polygon_2d_label.classes if class_lookup_table is not None: trim_values = _normalize_tf_strings(classes.values) mapped_class_ids = class_lookup_table(trim_values) else: mapped_class_ids = ( tf.ones_like(classes.values, dtype=tf.int32) * self.default_class_value ) attributes = polygon_2d_label.attributes if attribute_lookup_table is not None: trim_attributes = _normalize_tf_strings(attributes.values) mapped_attribute_ids = attribute_lookup_table(trim_attributes) else: mapped_attribute_ids = ( tf.ones_like(attributes.values, dtype=tf.int32) * self.default_attribute_value ) return Polygon2DLabel( vertices=polygon_2d_label.vertices, classes=tf.SparseTensor( values=mapped_class_ids, indices=classes.indices, dense_shape=classes.dense_shape, ), attributes=tf.SparseTensor( values=mapped_attribute_ids, indices=attributes.indices, dense_shape=attributes.dense_shape, ), )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/class_attribute_lookup_table.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Processor which lossily crops images.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.data_format import CHANNELS_FIRST from nvidia_tao_tf1.blocks.multi_source_loader.processors.processor import ( Processor, ) from nvidia_tao_tf1.blocks.multi_source_loader.types import ( Canvas2D, FEATURE_CAMERA, Images2D, LABEL_MAP, Polygon2DLabel, SequenceExample, ) from nvidia_tao_tf1.core.coreobject import save_args class LossyCrop(Processor): """Processor that lossily crops images.""" @save_args def __init__(self, left, top, right, bottom): """Creates a processor for cropping frames and labels. The origin of the coordinate system is at the top-left corner. Coordinates keep increasing from left to right and from top to bottom. top -------- left | | | | right -------- bottom Args: left (int): Left edge before which contents will be discarded. top (int): Top edge above which contents will be discarded. right (int): Right edge after which contents will be discarded bottom (int): Bottom edge after which contents will be discarded. """ super(LossyCrop, self).__init__() self._left = left self._top = top self._right = right self._bottom = bottom @property def supported_formats(self): """Data formats supported by this processor. Returns: data_formats (list of 'DataFormat'): Input data formats that this processor supports. """ return [CHANNELS_FIRST] def can_compose(self, other): """Can't compose in LossyCrop.""" return False def compose(self, other): """Does not support composing.""" raise NotImplementedError("ComposableProcessor.compose not implemented") def process(self, example): """ Processes examples by applying filters in sequence. The filters are applied in a 2D convolution fashion. Args: example (Example): Examples to process in format specified by data_format. Returns: example (Example): Example with a 2D filter applied. """ if not isinstance(example, SequenceExample): raise TypeError("Tried process input of type: {}".format(type(example))) instances = example.instances labels = example.labels canvas_shape = Canvas2D( height=tf.ones(self._bottom - self._top), width=tf.ones(self._right - self._left), ) if FEATURE_CAMERA in instances: images2d = instances[FEATURE_CAMERA] images = images2d.images new_height = self._bottom - self._top new_width = self._right - self._left image_dims = len(images.shape) if image_dims == 5: cropped = tf.slice( images, tf.stack([0, 0, 0, self._top, self._left]), tf.stack([-1, -1, -1, new_height, new_width]), ) elif image_dims == 4: cropped = tf.slice( images, tf.stack([0, 0, self._top, self._left]), tf.stack([-1, -1, new_height, new_width]), ) else: raise RuntimeError("Unhandled image dims: {}".format(image_dims)) instances[FEATURE_CAMERA] = Images2D( images=cropped, canvas_shape=canvas_shape ) if LABEL_MAP in labels: polygon_2d_label = labels[LABEL_MAP] coordinates_2d = polygon_2d_label.vertices sparse_coordinates = coordinates_2d.coordinates def _transform_coordinates(coordinates): vertices = tf.reshape(coordinates.values, [-1, 2]) translated = tf.subtract( vertices, tf.constant([self._left, self._top], dtype=tf.float32) ) translated = tf.reshape(translated, tf.shape(input=coordinates.values)) return tf.SparseTensor( indices=coordinates.indices, values=translated, dense_shape=coordinates.dense_shape, ) # In case where coordinates are empty do nothing. Transformations on empty Sparse # tensors were causing problems. transformed_coordinates = tf.cond( pred=tf.greater(tf.size(input=sparse_coordinates.values), 0), true_fn=lambda: _transform_coordinates(sparse_coordinates), false_fn=lambda: sparse_coordinates, ) transformed_coordinates_2d = coordinates_2d.replace_coordinates( transformed_coordinates, canvas_shape ) translated_polygon_2d_label = Polygon2DLabel( vertices=transformed_coordinates_2d, classes=polygon_2d_label.classes, attributes=polygon_2d_label.attributes, ) labels[LABEL_MAP] = translated_polygon_2d_label return SequenceExample(instances=instances, labels=labels)
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/lossy_crop.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test for DriveNetLegacyMapper.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from parameterized import parameterized import tensorflow as tf import modulus from nvidia_tao_tf1.blocks.multi_source_loader.processors.drivenet_legacy_mapper import ( DriveNetLegacyMapper, ) class TestDriveNetLegacyMapper(tf.test.TestCase): """Test for DriveNetLegacyMapper.""" @parameterized.expand( [ ("some class", "bottomWidth", "leftRight", "some_class", 3, 2), ("some other class", "bottom", "bottomRight", "some_other_class", 1, 3), ("no class", "width", "other bogus value", "no_class", 2, 0), ("school in july", "bogus value", "right", "school_in_july", 0, 2), ("heavy truck", "unknown", "bottomLeftRight", "heavy_truck", 0, 3), ("big SUV", "full", "bottom", "big_SUV", 0, 1), ("mini cooper", "Unknown", "bottomLeft", "mini_cooper", 0, 3), ("crossover", "?", "left", "crossover", 0, 2), ("luxury sedan", "not sure", "full", "luxury_sedan", 0, 0), ("sports car", "bottom", "unknown", "sports_car", 1, 0), ] ) def test_process( self, object_class, occlusion, truncation_type, expected_object_class, expected_occlusion, expected_truncation_type, ): row = [ u"cyclops-c", 0, 46835, 0, 0, u"cyclops-c", 48, "", u"004f2c28-6bf8-5596-9216-963a141e7775", 0, 0, u"120FOV", u"none", u"video_1_front_center_120FOV_cyclops", 1008, 1920, 0, [], 0.26423147320747375, u"automobile", 1, 0, 0, 0, u"width", u"full", [[50.5, 688.83], [164.28, 766.8]], None, None, # These 2 are the fields added by "add_fields". u"BOX", ] label_indices = { "BOX": { "is_cvip": 21, "occlusion": 24, "vertices": 26, "back": 18, "non_facing": 22, "front": 20, "attributes": 17, "classifier": 19, "num_vertices": 23, "truncation": 25, "mapped_occlusion": 27, "mapped_truncation": 28, }, "dtype": 29, } row[label_indices["BOX"]["classifier"]] = object_class row[label_indices["BOX"]["truncation"]] = truncation_type row[label_indices["BOX"]["occlusion"]] = occlusion example = modulus.types.types.Example(instances=None, labels=label_indices) mapper = DriveNetLegacyMapper() keep = mapper.filter(example, "BOX", row) assert keep mapped_row = mapper.map(example, "BOX", row) assert mapped_row[label_indices["BOX"]["classifier"]] == expected_object_class assert ( mapped_row[label_indices["BOX"]["mapped_truncation"]] == expected_truncation_type ) assert ( mapped_row[label_indices["BOX"]["mapped_occlusion"]] == expected_occlusion )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/drivenet_legacy_mapper_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Processor for applying random contrast augmentations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from nvidia_tao_tf1.blocks.multi_source_loader.processors.transform_processor import ( TransformProcessor, ) from nvidia_tao_tf1.core.coreobject import save_args from nvidia_tao_tf1.core.processors import RandomContrast as _RandomContrast class RandomContrast(TransformProcessor): """Augmentation processor that randomly perturbs the contrast of images.""" @save_args def __init__(self, scale_max, center): """Construct a RandomContrast processor. Args: scale_max (float): The scale (or slope) of the contrast, as rotated around the provided center point. This value is half of the standard deviation, where values of twice the standard deviation are truncated. A value of 0 will not affect the matrix. center (float): The center around which the contrast is 'tilted', this is generally equal to the middle of the pixel value range. This value is typically 0.5 with a maximum pixel value of 1, or 127.5 when the maximum value is 255. """ super(RandomContrast, self).__init__(_RandomContrast(scale_max, center))
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/random_contrast.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test for RiderRefineMapper.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import pytest import modulus from nvidia_tao_tf1.blocks.multi_source_loader.processors.rider_refine_mapper import ( RiderRefineMapper, ) class TestRiderRefineMapper(object): """Test for RiderRefineMapper.""" @pytest.mark.parametrize( "object_class, attributes, expected_object_class", [ # Below are attribute types with label_name = "rider" in training and test datasets. ("rider", ["other", "other_occlusion", "vehicle"], "vehicle_rider"), ("rider", ["other", "vehicle"], "vehicle_rider"), ("rider", ["other_occlusion", "vehicle"], "vehicle_rider"), ("rider", ["vehicle"], "vehicle_rider"), ("rider", ["bicycle", "other_occlusion"], "bicycle_rider"), ("rider", ["bicycle"], "bicycle_rider"), ("rider", ["other", "bicycle", "other_occlusion"], "bicycle_rider"), ("rider", ["other", "bicycle"], "bicycle_rider"), ("rider", ["motorcycle", "other"], "motorcycle_rider"), ("rider", ["motorcycle", "other_occlusion"], "motorcycle_rider"), ("rider", ["motorcycle", "other", "other_occlusion"], "motorcycle_rider"), ("rider", ["motorcycle"], "motorcycle_rider"), ("rider", ["bicycle", "vehicle"], "unknown_rider"), ("rider", ["motorcycle", "bicycle"], "unknown_rider"), ("rider", ["motorcycle", "vehicle"], "unknown_rider"), ("rider", ["other", "other_occlusion"], "unknown_rider"), ("rider", ["other"], "unknown_rider"), ("rider", ["other_occlusion"], "unknown_rider"), ("rider", [], "unknown_rider"), # Check the correctness of other source class. ("heavy truck", [], "heavy truck"), ], ) def test_map(self, object_class, attributes, expected_object_class): """Test the correctness of map method in RiderRefineMapper.""" dummy_row = [[], u"automobile", u"BOX"] dummy_label_indices = {"BOX": {"attributes": 0, "classifier": 1}, "dtype": 2} dummy_row[dummy_label_indices["BOX"]["classifier"]] = object_class dummy_row[dummy_label_indices["BOX"]["attributes"]] = attributes example = modulus.types.types.Example( instances=None, labels=dummy_label_indices ) mapper = RiderRefineMapper() keep = mapper.filter(example, "BOX", dummy_row) assert keep mapped_row = mapper.map(example, "BOX", dummy_row) assert ( mapped_row[dummy_label_indices["BOX"]["classifier"]] == expected_object_class )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/rider_refine_mapper_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PolygonRasterizer benchmark suite.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from parameterized import parameterized import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.processors.polygon_rasterizer import ( PolygonRasterizer, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.test_fixtures import ( make_polygon2d_label, ) from modulus.utils import test_session class PolygonRasterizerBenchmark(tf.test.Benchmark): """PolygonRasterizer benchmark suite.""" ITERATIONS = 100 def _benchmark_polygon_rasterizer( self, sess, example_count, frame_count, image_width, image_height, polygon_count, vertex_count, rasterizer_width, rasterizer_height, ): """ Benchmark polygon_rasterizer with an example as input. Args: sess (tf.Session()): Session to run the benchmark. example_count (int): Number of examples in each batch. frame_count (int): Number of frames in each example. image_width (int): Image width to generate image. image_height (int): Image height to generate image. polygon_count (int): Number of polygons in each image. vertex_count (int): Number of vertices in each polygon. rasterizer_width (int): Width of output map. rasterizer_height (int): Height of output map. """ shapes_per_frame = [] for _ in range(example_count): shapes_per_frame.append([polygon_count for _ in range(frame_count)]) labels2d = make_polygon2d_label( shapes_per_frame=shapes_per_frame, shape_classes=[4], shape_attributes=[4], height=image_height, width=image_width, coordinates_per_polygon=vertex_count, ) processor = PolygonRasterizer( height=rasterizer_height, width=rasterizer_width, one_hot=True, binarize=True, nclasses=5, ) rasterized = processor.process(labels2d) self.run_op_benchmark( sess=sess, op_or_tensor=rasterized.op, min_iters=self.ITERATIONS, store_trace=True, store_memory_usage=True, ) @parameterized.expand( [ [1, "/cpu:0"], [10, "/cpu:0"], [100, "/cpu:0"], [1000, "/cpu:0"], [1, "/gpu:0"], [10, "/gpu:0"], [100, "/gpu:0"], [1000, "/gpu:0"], ] ) def benchmark_polygon_rasterizer_triangle_count( self, polygon_count, device_placement ): """Benchmark different numbers of triangles in each frame.""" print("triangles_count {} device {}.".format(polygon_count, device_placement)) with tf.device(device_placement), test_session( allow_soft_placement=True ) as sess: self._benchmark_polygon_rasterizer( sess=sess, example_count=32, frame_count=1, image_width=960, image_height=504, polygon_count=polygon_count, vertex_count=3, rasterizer_width=240, rasterizer_height=80, ) @parameterized.expand( [ [1, "/cpu:0"], [10, "/cpu:0"], [50, "/cpu:0"], [100, "/cpu:0"], [1, "/gpu:0"], [10, "/gpu:0"], [50, "/gpu:0"], [100, "/gpu:0"], ] ) def benchmark_polygon_rasterizer_vertice_count( self, vertex_count, device_placement ): """Benchmark different numbers of vertices in each polygon.""" print("vertex_count {} device {}.".format(vertex_count, device_placement)) with tf.device(device_placement), test_session( allow_soft_placement=True ) as sess: self._benchmark_polygon_rasterizer( sess=sess, example_count=32, frame_count=1, image_width=960, image_height=504, polygon_count=5, vertex_count=vertex_count, rasterizer_width=240, rasterizer_height=80, ) @parameterized.expand( [ [1, "/cpu:0"], [4, "/cpu:0"], [8, "/cpu:0"], [16, "/cpu:0"], [32, "/cpu:0"], [64, "/cpu:0"], [1, "/gpu:0"], [4, "/gpu:0"], [8, "/gpu:0"], [16, "/gpu:0"], [32, "/gpu:0"], [64, "/gpu:0"], ] ) def benchmark_polygon_rasterizer_example_count( self, example_count, device_placement ): """Benchmark different numbers of examples in each batch.""" print("example_count {} device {}.".format(example_count, device_placement)) with tf.device(device_placement), test_session( allow_soft_placement=True ) as sess: self._benchmark_polygon_rasterizer( sess=sess, example_count=example_count, frame_count=1, image_width=960, image_height=504, polygon_count=5, vertex_count=3, rasterizer_width=240, rasterizer_height=80, ) @parameterized.expand( [ [1, "/cpu:0"], [2, "/cpu:0"], [4, "/cpu:0"], [8, "/cpu:0"], [1, "/gpu:0"], [2, "/gpu:0"], [4, "/gpu:0"], [8, "/gpu:0"], ] ) def benchmark_polygon_rasterizer_frame_count(self, frame_count, device_placement): """Benchmark different numbers of frames in each example.""" print("frame_count {} device {}.".format(frame_count, device_placement)) with tf.device(device_placement), test_session( allow_soft_placement=True ) as sess: self._benchmark_polygon_rasterizer( sess=sess, example_count=32, frame_count=frame_count, image_width=960, image_height=504, polygon_count=5, vertex_count=3, rasterizer_width=240, rasterizer_height=80, ) @parameterized.expand( [ [24, 8, "/cpu:0"], [240, 80, "/cpu:0"], [480, 160, "/cpu:0"], [960, 504, "/cpu:0"], [24, 8, "/gpu:0"], [240, 80, "/gpu:0"], [480, 160, "/gpu:0"], [960, 504, "/gpu:0"], ] ) def benchmark_polygon_rasterizer_size( self, rasterizer_width, rasterizer_height, device_placement ): """Benchmark different rasterizer size.""" print( "rasterizer size {} x {} device {}.".format( rasterizer_width, rasterizer_height, device_placement ) ) with tf.device(device_placement), test_session( allow_soft_placement=True ) as sess: self._benchmark_polygon_rasterizer( sess=sess, example_count=32, frame_count=1, image_width=960, image_height=504, polygon_count=5, vertex_count=3, rasterizer_width=rasterizer_width, rasterizer_height=rasterizer_height, ) if __name__ == "__main__": tf.test.main()
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/polygon_rasterizer_benchmark.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Processor for applying random hue and saturation augmentations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from nvidia_tao_tf1.blocks.multi_source_loader.processors.transform_processor import ( TransformProcessor, ) from nvidia_tao_tf1.core.coreobject import save_args from nvidia_tao_tf1.core.processors import RandomBrightness as _RandomBrightness class RandomBrightness(TransformProcessor): """Augmentation processor that randomly perturbs color brightness.""" @save_args def __init__(self, scale_max, uniform_across_channels): """Construct a RandomBrightness processor. Args: scale_max (float): The range of the brightness offsets. This value is half of the standard deviation, where values of twice the standard deviation are truncated. A value of 0 (default) will not affect the matrix. uniform_across_channels (bool): If true will apply the same brightness shift to all channels. If False, will apply a different brightness shift to each channel. """ super(RandomBrightness, self).__init__( _RandomBrightness(scale_max, uniform_across_channels) )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/random_brightness.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Base class for unittests that test processors implemented using TensorFlow.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.types import ( Coordinates2D, Example, FEATURE_CAMERA, LABEL_MAP, Polygon2DLabel, PolygonLabel, ) import nvidia_tao_tf1.blocks.multi_source_loader.types.test_fixtures as fixtures class ProcessorTestCase(tf.test.TestCase): """ Base class for unit tests testing Processors. Processors are tested using this tf.test.TestCase derived class because of the convenient test helpers provided by that class. This class adds commonly used methods to avoid repetition. """ def make_polygon_label(self, vertices, class_id=0): """Create a PolygonLabel. Args: vertices (list of `float`): Vertices that make up the polygon. class_id (int): Identifier for the class that the label represents. """ polygons = tf.constant(vertices, dtype=tf.float32) vertices_per_polygon = tf.constant([len(vertices)], dtype=tf.int32) class_ids_per_polygon = tf.constant([class_id], dtype=tf.int32) attributes_per_polygon = tf.constant([1], dtype=tf.int32) polygons_per_image = tf.constant([1], dtype=tf.int32) attributes = (tf.constant([], tf.int32),) attribute_count_per_polygon = tf.constant([], tf.int32) return PolygonLabel( polygons=polygons, vertices_per_polygon=vertices_per_polygon, class_ids_per_polygon=class_ids_per_polygon, attributes_per_polygon=attributes_per_polygon, polygons_per_image=polygons_per_image, attributes=attributes, attribute_count_per_polygon=attribute_count_per_polygon, ) def make_empty_polygon2d_labels(self, *args): """Create empty Polygon2DLabel. Args: *args (list of example dims): A set of example dims, e.g. (<batch>, <seq>). """ batch_size = args[0] if args else 0 list_args = list(args) empty_polygon2d = Polygon2DLabel( vertices=Coordinates2D( coordinates=tf.SparseTensor( indices=tf.reshape(tf.constant([], tf.int64), [0, len(args) + 3]), values=tf.constant([], tf.float32), dense_shape=tf.constant(list_args + [0, 0, 0], tf.int64), ), canvas_shape=fixtures.make_canvas2d(batch_size, 960, 560), ), classes=tf.SparseTensor( indices=tf.reshape(tf.constant([], tf.int64), [0, len(args) + 2]), values=tf.constant([], tf.int32), dense_shape=tf.constant(list_args + [0, 0], tf.int64), ), attributes=tf.SparseTensor( indices=tf.reshape(tf.constant([], tf.int64), [0, len(args) + 2]), values=tf.constant([], tf.int32), dense_shape=tf.constant(list_args + [0, 0], tf.int64), ), ) return empty_polygon2d def make_example_128x240(self): """Return Example with a triangular label.""" frames = tf.ones((1, 128, 240, 3)) labels = self.make_polygon_label([[120, 0.0], [240, 128], [0.0, 128]]) return Example(instances={FEATURE_CAMERA: frames}, labels={LABEL_MAP: labels}) def assert_labels_close(self, expected, actual): """Assert that labels match. Args: expected (PolygonLabel): Expected label. actual (PolygonLabel): Actual label.s """ # Polygons are in pixel coordinates - milli-pixel tolerance seems acceptable self.assertAllClose( expected.polygons.eval(), actual.polygons.eval(), rtol=1e-3, atol=1e-3 ) self.assertAllEqual( expected.vertices_per_polygon.eval(), actual.vertices_per_polygon.eval() ) self.assertAllEqual( expected.class_ids_per_polygon.eval(), actual.class_ids_per_polygon.eval() ) self.assertAllEqual( expected.attributes_per_polygon.eval(), actual.attributes_per_polygon.eval() ) self.assertEqual( expected.polygons_per_image.eval(), actual.polygons_per_image.eval() ) def assertSparseEqual(self, expected, actual): """Assert that two sparse tensors match. Args: expected (tf.SparseTensor): Expected tensor. actual (tf.SparseTensor): Actual tensor. """ self.assertAllEqual(expected.indices, actual.indices) self.assertAllEqual(expected.dense_shape, actual.dense_shape) self.assertAllClose(expected.values, actual.values)
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/processor_test_case.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Processors for transforming and augmenting data.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from nvidia_tao_tf1.blocks.multi_source_loader.processors.asset_loader import ( AssetLoader, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.bbox_clipper import ( BboxClipper, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.class_attribute_lookup_table import ( # noqa ClassAttributeLookupTable, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.class_attribute_mapper import ( ClassAttributeMapper, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.crop import Crop from nvidia_tao_tf1.blocks.multi_source_loader.processors.filter2d_processor import ( Filter2DProcessor, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.instance_mapper import ( InstanceMapper, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.label_adjustment import ( LabelAdjustment, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.lossy_crop import ( LossyCrop, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.multiple_polyline_to_polygon import ( # noqa MultiplePolylineToPolygon, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.pipeline import Pipeline from nvidia_tao_tf1.blocks.multi_source_loader.processors.polygon_rasterizer import ( PolygonRasterizer, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.polyline_clipper import ( PolylineClipper, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.polyline_to_polygon import ( PolylineToPolygon, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.priors_generator import ( PriorsGenerator, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.processor import ( Processor, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.random_brightness import ( RandomBrightness, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.random_contrast import ( RandomContrast, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.random_flip import ( RandomFlip, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.random_gaussian_blur import ( RandomGaussianBlur, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.random_glimpse import ( RandomGlimpse, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.random_hue_saturation import ( RandomHueSaturation, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.random_rotation import ( RandomRotation, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.random_shear import ( RandomShear, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.random_translation import ( RandomTranslation, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.random_zoom import ( RandomZoom, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.rasterize_and_resize import ( RasterizeAndResize, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.scale import Scale from nvidia_tao_tf1.blocks.multi_source_loader.processors.sparse_to_dense_polyline import ( SparseToDensePolyline, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.temporal_batcher import ( TemporalBatcher, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.transform_processor import ( TransformProcessor, ) __all__ = ( "AssetLoader", "BboxClipper", "ClassAttributeLookupTable", "ClassAttributeMapper", "Crop", "InstanceMapper", "LabelAdjustment", "LossyCrop", "MultiplePolylineToPolygon", "Pipeline", "PolylineClipper", "PolygonRasterizer", "PolylineToPolygon", "PriorsGenerator", "Processor", "RandomBrightness", "RandomContrast", "RandomFlip", "RandomGlimpse", "RandomHueSaturation", "RandomRotation", "RandomTranslation", "RandomShear", "RandomZoom", "RasterizeAndResize", "Scale", "SparseToDensePolyline", "TransformProcessor", "RandomGaussianBlur", "TemporalBatcher", "Filter2DProcessor", )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/__init__.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for PolygonRasterizer processor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import re from parameterized import parameterized from nvidia_tao_tf1.blocks.multi_source_loader.processors.polygon_rasterizer import ( PolygonRasterizer, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.processor_test_case import ( ProcessorTestCase, ) import nvidia_tao_tf1.blocks.multi_source_loader.types.test_fixtures as fixtures from nvidia_tao_tf1.core.coreobject import deserialize_tao_object class TestPolygonRasterizer(ProcessorTestCase): @parameterized.expand( [ [0, 1, "height: 0 is not a positive number."], [1, 0, "width: 0 is not a positive number."], ] ) def test_assertions_fail_for_invalid_arguments( self, height, width, expected_message ): with self.assertRaisesRegexp(ValueError, re.escape(expected_message)): PolygonRasterizer( height=height, width=width, one_hot=True, binarize=True, nclasses=1 ) def test_empty_polygon2d_labels(self): new_height = 252 new_width = 480 batch_size = 32 frame_size = 1 class_count = 2 expected_shape = ( batch_size, frame_size, class_count + 1, new_height, new_width, ) empty_polygon2d = self.make_empty_polygon2d_labels(batch_size, frame_size) processor = PolygonRasterizer( height=new_height, width=new_width, one_hot=True, binarize=True, nclasses=class_count, ) with self.session() as sess: rasterized = processor.process(empty_polygon2d) self.assertEqual(expected_shape, rasterized.shape) rasters = sess.run(rasterized) self.assertEqual(expected_shape, rasters.shape) @parameterized.expand( [ [[[1]]], [[[1, 1]]], [[[1], [2]]], [[[1, 2], [2, 3]]], [[[1, 2, 3], [4, 5, 6]]], ] ) def test_rasterizes_polygon2d_labels(self, shapes_per_frame): new_height = 252 new_width = 480 class_count = 2 example_count = len(shapes_per_frame) max_frame_count = max( [len(frames_per_example) for frames_per_example in shapes_per_frame] ) expected_shape = ( example_count, max_frame_count, class_count + 1, new_height, new_width, ) processor = PolygonRasterizer( height=new_height, width=new_width, one_hot=True, binarize=True, nclasses=class_count, ) with self.session() as sess: polygon = fixtures.make_polygon2d_label( shapes_per_frame=shapes_per_frame, shape_classes=[1], shape_attributes=[0], height=new_height, width=new_width, ) rasterized = processor.process(polygon) self.assertEqual(expected_shape, rasterized.shape) rasters = sess.run(rasterized) self.assertEqual(expected_shape, rasters.shape) @parameterized.expand( [ [[[1]]], [[[1, 1]]], [[[1], [2]]], [[[1, 2], [2, 3]]], [[[1, 2, 3], [4, 5, 6]]], ] ) def test_rasterizes_polygon2d_labels_not_one_hot(self, shapes_per_frame): new_height = 252 new_width = 480 class_count = 2 example_count = len(shapes_per_frame) max_frame_count = max( [len(frames_per_example) for frames_per_example in shapes_per_frame] ) expected_shape = (example_count, max_frame_count, 1, new_height, new_width) processor = PolygonRasterizer( height=new_height, width=new_width, one_hot=False, binarize=True, nclasses=class_count, ) with self.session() as sess: polygon = fixtures.make_polygon2d_label( shapes_per_frame=shapes_per_frame, shape_classes=[1], shape_attributes=[0], height=new_height, width=new_width, ) rasterized = processor.process(polygon) self.assertEqual(expected_shape, rasterized.shape) rasters = sess.run(rasterized) self.assertEqual(expected_shape, rasters.shape) def test_serialization_and_deserialization(self): """Test that it is a TAOObject that can be serialized and deserialized.""" processor = PolygonRasterizer( height=10, width=12, one_hot=False, binarize=True, nclasses=2 ) processor_dict = processor.serialize() deserialized_processor = deserialize_tao_object(processor_dict) self.assertEqual( processor._rasterize.height, deserialized_processor._rasterize.height ) self.assertEqual( processor._rasterize.width, deserialized_processor._rasterize.width ) self.assertEqual(processor.converters, deserialized_processor.converters)
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/polygon_rasterizer_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Interface that a pipeline processors must implement.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from abc import abstractmethod, abstractproperty from nvidia_tao_tf1.core.coreobject import AbstractTAOObject class Processor(AbstractTAOObject): """Interface that a pipeline processors must implement.""" def __init__(self): """Construct a processor.""" super(Processor, self).__init__() self._data_format = None def __str__(self): """Returns a string representation of this processor.""" return type(self).__name__ @abstractproperty def supported_formats(self): """Data formats supported by this processor. Returns: data_formats (list of 'DataFormat'): Input data formats that this processor supports. """ @property def data_format(self): """Data format of the image/frame tensors to process.""" return self._data_format @data_format.setter def data_format(self, value): self._data_format = value @abstractmethod def can_compose(self, other): """ Determine whether two processors can be composed into a single one. Args: other (Processor): Other processor instance. Returns: (Boolean): True if this processor knows how to compose the other processor. """ raise NotImplementedError("Processors.can_compose not implemented.") @abstractmethod def compose(self, other): """Compose two processors into a single one. Example: Given a list of processors: processors = [Processor(can_compose=False), Processor(can_compose=True), Processor(can_compose=True)] The two consecutive processors at the end of the list can be combined into a single processor to reduce compute: composed = [Processor(can_compose=False), Processor(can_compose=True)] The main use case for this interface are processors based on spatial and color transform matrices. The caller (e.g. Pipeline) can traverse a list of processors and compose consecutive composable processors into a single processor by calling their compose method. Args: other (Processor): Other processor instance. Returns: processor (Processor): A new processor that performs the same processing as the individual processors applied in sequence. """ raise NotImplementedError("ComposableProcessor.compose not implemented") @abstractmethod def process(self, example): """ Process an example. Args: example (Example): Example with frames in format specified by data_format. Returns: (Example): Processed example. """ raise NotImplementedError("Processors.process not implemented.")
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/processor.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Processor for applying random translation augmentations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf def gaussian_kernel(size, mean=0, stddev=None): """Return a Gaussian kernel as a Gaussian blurring filter. Args: size (int): The filter size. mean (float): Mean of the normal distribution. stddev (float): Std of the normal distribution. Return: (tensor): A float tensor of shape [size, 1]. """ # If stddev is not given, infer it from the filter size. # sigma = 0.3*((ksize-1)*0.5 - 1) + 0.8. # https://docs.opencv.org/2.4/modules/imgproc/doc/ # filtering.html?highlight=gaussianblur#Mat%20 # getGaussianKernel(int%20ksize,%20double%20sigma,%20int%20ktype) if stddev is None: stddev = 0.3 * ((size - 1) * 0.5 - 1) + 0.8 normal_distribution = tf.compat.v1.distributions.Normal(mean, stddev) # When size = 5, # vals = g(-2), g(-1), g(0), g(1), g(2), where # g(x) = 1/(sqrt(2*pi)*sigma) * exp(-x^2/(2*sigma^2)). # array([0.05399096, 0.24197073, 0.3989423 , 0.24197073, 0.05399096]. range_x = tf.range(start=0, limit=size, dtype=tf.int32) - (size - 1) // 2 vals = normal_distribution.prob(tf.cast(range_x, tf.float32)) kernel = tf.reshape(vals, [size, 1]) return kernel / tf.reduce_sum(input_tensor=kernel)
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/gaussian_kernel.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Processor that loads assets referenced in Examples.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from nvidia_tao_tf1.core import processors class AssetLoader(processors.Processor): """Processor that loads assets referenced in Examples.""" def __init__(self, output_dtype=tf.float32, **kwargs): """Construct a processor that loads referenced assets. Args: output_dtype (tf.dtypes.DType): Output image dtype. Defaults to tf.float32. """ super(AssetLoader, self).__init__(**kwargs) self._output_dtype = output_dtype def call(self, example): """Load all referenced assets recursively. Args: example (SequenceExample): Example composing of geometric primitives. All primitives with a `load` method will be replaced by the return value of the load method. Returns: (SequenceExample): Example with all references to assets replaced with the actual assets. """ return self._load_features(example) def _load_features(self, example): # TODO(vkallioniemi): This functionality is mostly the same with the recursion # code in TransformedExample - extract into a separate method. def _load_recursive(value): def _is_namedtuple(value): """Return true if value is a namedtuple.""" return isinstance(value, tuple) and hasattr(value, "_fields") # Call load only if implemented on a namedtuple load_op = getattr(value, "load", None) if _is_namedtuple(value) and (load_op is not None and callable(load_op)): return load_op(self._output_dtype) if isinstance(value, (list, set)): return [_load_recursive(v) for v in value] if isinstance(value, dict): return {k: _load_recursive(v) for (k, v) in value.items()} if _is_namedtuple(value): return value._make( [_load_recursive(field) for field in value._asdict().values()] ) # Stop recursion - unknown non-collection types are treated as leaf nodes. return value return _load_recursive(example)
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/asset_loader.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Class for mapping input classes and attributes to output classes.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import logging import numpy as np import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.processors import sparse_generators from nvidia_tao_tf1.blocks.multi_source_loader.types import Polygon2DLabel from nvidia_tao_tf1.core.coreobject import TAOObject, save_args logger = logging.getLogger(__name__) class ClassAttributeMapper(TAOObject): """ClassAttributeMapper maps input classes and attributes to output classes.""" @save_args def __init__( self, output_class_mappings, default_class_name, default_class_id, attribute_mappings, default_attribute_id, **kwargs ): """Construct a ClassAttributeMapper. Args output_class_mappings(ordered collection of output class maps): The output class mappings to be applied. default_class_name (string): The default class name. To be applied if not of the output_class_mappings match. default_class_id (int): The default class id. To be applied if not of the output_class_mappings match. attribute_mappings(dict of attribute name -> id mappings): The attribute name to id mappings to be applied. default_attribute_id(int): The default attribute id, To be applied if not in the attribute_mappings """ self._attribute_mappings = { k.strip().lower(): v for k, v in attribute_mappings.items() } self._default_attribute_id = default_attribute_id self._default_class_id = default_class_id self._default_class_name = default_class_name self._class_matchers = [_Matcher(**oc) for oc in output_class_mappings] super(ClassAttributeMapper, self).__init__(**kwargs) def __call__(self, polygon_2d_label): """Map text class and attribute names to numeric ids. Args: polygon_2d_label (Polygon2DLabel): A label containing 2D polygons/polylines and their associated classes and attributes. The first two dimensions of each tensor that this structure contains should be batch/example(B) followed by a frame/time dimension(T). The rest of the dimensions encode type specific information. See Polygon2DLabel documentation for details Returns: (Polygon2DLabel): The label with the classes and attributes mapped to numeric ids. """ # Classes are stored in a 4D tensor of shape [B, T, S, C], where # B=Batch (example within batch), T=Time step, S=Shape, C=Class(always 1) classes = polygon_2d_label.classes # Attributes are stored in a 4D tensor of shape [B, T, S, A ], where # B=Batch (example within batch), T=Time step, S=Shape, A=Attribute # 0 or more attributes per shape attributes = polygon_2d_label.attributes # Want to match all indices other than the class/attribute index. index_prefix_size = 3 def _mapper( class_values, class_indices, class_shape, attribute_values, attributes_indices, attributes_shape, ): class_ids = [] class_ids_indices = [] attribute_ids = [] attribute_ids_indices = [] for ( sub_index, class_names, _, attribute_names, _, ) in sparse_generators.matching_indices_generator( index_prefix_size, class_values, class_indices, attribute_values, attributes_indices, ): matched_class_id = None matched_attributes = None for matcher in self._class_matchers: matched_class_id, matched_attributes = matcher.match( class_names, attribute_names ) if matched_class_id is not None: break class_ids.append( matched_class_id if matched_class_id is not None else self._default_class_id ) class_ids_indices.append(sub_index + [0]) attribute_index_counter = 0 # Sets don't iterate deterministically in Python 3, convert to a sorted list so this # function gives back consistent return values (useful for testing). matched_attributes = sorted(list(matched_attributes)) for attribute in matched_attributes: attribute_ids.append( self._attribute_mappings.get( attribute, self._default_attribute_id ) ) attribute_ids_indices.append(sub_index + [attribute_index_counter]) attribute_index_counter += 1 class_ids = np.array(class_ids, dtype=np.int32) class_ids_indices = np.array(class_ids_indices, dtype=np.int64) attribute_ids = np.array(attribute_ids, dtype=np.int32) attribute_ids_indices = np.array(attribute_ids_indices, dtype=np.int64) return ( class_ids, class_ids_indices, class_shape, attribute_ids, attribute_ids_indices, attributes_shape, ) ( mapped_class_ids, mapped_class_indices, mapped_class_shape, mapped_attribute_ids, mapped_attribute_indices, mapped_attribute_shape, ) = tf.compat.v1.py_func( _mapper, [ classes.values, classes.indices, classes.dense_shape, attributes.values, attributes.indices, attributes.dense_shape, ], [tf.int32, tf.int64, tf.int64, tf.int32, tf.int64, tf.int64], stateful=False, ) return Polygon2DLabel( vertices=polygon_2d_label.vertices, classes=tf.SparseTensor( values=mapped_class_ids, indices=mapped_class_indices, dense_shape=mapped_class_shape, ), attributes=tf.SparseTensor( values=mapped_attribute_ids, indices=mapped_attribute_indices, dense_shape=mapped_attribute_shape, ), ) def _normalized_set(strings): if strings is None: return set() return {_decode(s.strip().lower()) for s in strings} def _decode(obj): """Decodes byte strings into unicode strings in Python 3.""" if isinstance(obj, str): return obj return obj.decode() class _Matcher(object): """Matcher class which matches against a single class match specification.""" def __init__( self, class_id, class_name, match_any_class, match_any_attribute=None, match_all_attributes=None, match_all_attributes_allow_others=False, remove_matched_attributes=False, ): self._any_class = _normalized_set(match_any_class) self._any_attributes = _normalized_set(match_any_attribute) self._all_attributes = _normalized_set(match_all_attributes) self._all_attributes_allow_others = match_all_attributes_allow_others self._class_id = class_id self._class_name = class_name self._remove_matched_attributes = remove_matched_attributes def match(self, class_names, attribute_names): class_names = _normalized_set(class_names) attribute_names = _normalized_set(attribute_names) # Possible match in case class_names intersects with any class or if both are empty, # otherwise return no match here if not (class_names & self._any_class) and (class_names or self._any_class): return None, attribute_names if self._any_attributes and not (attribute_names & self._any_attributes): return None, attribute_names if self._all_attributes: if self._all_attributes_allow_others: if len(attribute_names & self._all_attributes) != len( self._all_attributes ): return None, attribute_names else: if not self._all_attributes == attribute_names: return None, attribute_names if self._remove_matched_attributes: attribute_names = attribute_names.difference(self._any_attributes) attribute_names = attribute_names.difference(self._all_attributes) return self._class_id, attribute_names
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/class_attribute_mapper.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Processor for applying random rotation augmentations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from nvidia_tao_tf1.blocks.multi_source_loader.processors.transform_processor import ( TransformProcessor, ) from nvidia_tao_tf1.core.coreobject import save_args from nvidia_tao_tf1.core.processors import RandomRotation as _RandomRotation class RandomRotation(TransformProcessor): """Augmentation processor that randomly rotates images and labels.""" @save_args def __init__(self, min_angle, max_angle, probability=1.0): """Construct a RandomRotation processor. Args: min_angle (float): Minimum angle in degrees. max_angle (float): Maximum angle in degrees. probability (float): Probability at which rotation is performed. """ super(RandomRotation, self).__init__( _RandomRotation( min_angle=min_angle, max_angle=max_angle, probability=probability ) )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/random_rotation.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for RandomRotation processor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import pytest import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.processors.processor_test_case import ( ProcessorTestCase, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.random_gaussian_blur import ( RandomGaussianBlur, ) from nvidia_tao_tf1.core.coreobject import deserialize_tao_object class TestRandomGaussianBlur(ProcessorTestCase): def test_forwards_init_value_errors_to_caller(self): with pytest.raises(ValueError): RandomGaussianBlur( min_filter_size=5, max_filter_size=2, max_stddev=1.0, probability=2.0 ) def test_get_filters(self): random_gaussian_blur = RandomGaussianBlur( min_filter_size=5, max_filter_size=5, max_stddev=1.0, probability=1.0 ) with tf.compat.v1.Session() as sess: np_gaussian_filter_list = sess.run(random_gaussian_blur.get_filters()) filter_list_length = len(np_gaussian_filter_list) filter_sum_0 = np.sum(np_gaussian_filter_list[0]) filter_sum_1 = np.sum(np_gaussian_filter_list[1]) filter_0_shape = np_gaussian_filter_list[0].shape filter_1_shape = np_gaussian_filter_list[1].shape np.testing.assert_equal(filter_list_length, 2) np.allclose(filter_sum_0, 1.0, atol=1e-6) np.allclose(filter_sum_1, 1.0, atol=1e-6) np.testing.assert_equal(filter_0_shape, (5, 1)) np.testing.assert_equal(filter_1_shape, (1, 5)) def test_serialization_and_deserialization(self): """Test that it is a TAOObject that can be serialized and deserialized.""" random_gaussian_blur = RandomGaussianBlur( min_filter_size=5, max_filter_size=5, max_stddev=1.0, probability=1.0 ) random_gaussian_blur_dict = random_gaussian_blur.serialize() deserialized_dict = deserialize_tao_object(random_gaussian_blur_dict) random_gaussian_blur._min_filter_size == deserialized_dict._min_filter_size random_gaussian_blur._max_filter_size == deserialized_dict._max_filter_size
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/random_gaussian_blur_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for RandomShear processor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from mock import Mock, patch import pytest from nvidia_tao_tf1.blocks.multi_source_loader.processors.processor_test_case import ( ProcessorTestCase, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.random_shear import ( RandomShear, ) from nvidia_tao_tf1.core.coreobject import deserialize_tao_object class TestRandomShear(ProcessorTestCase): @patch( "nvidia_tao_tf1.blocks.multi_source_loader.processors.random_shear._RandomShear" ) def test_forwards_constructor_arguments_to_transform(self, mocked_shear_transform): max_ratio_x = Mock() max_ratio_y = Mock() probability = Mock() RandomShear( max_ratio_x=max_ratio_x, max_ratio_y=max_ratio_y, probability=probability ) mocked_shear_transform.assert_called_with( max_ratio_x=max_ratio_x, max_ratio_y=max_ratio_y, probability=probability ) @patch( "nvidia_tao_tf1.blocks.multi_source_loader.processors.random_shear._RandomShear" ) def test_forwards_transform_value_errors_to_caller(self, mocked_shear_transform): def raise_value_error(**kwargs): raise ValueError("test error") mocked_shear_transform.side_effect = raise_value_error with pytest.raises(ValueError) as exc: RandomShear(max_ratio_x=-1, max_ratio_y=-5, probability=1.0) assert "test error" in str(exc) def test_serialization_and_deserialization(self): """Test that it is a TAOObject that can be serialized and deserialized.""" augmentation = RandomShear(max_ratio_x=1, max_ratio_y=5, probability=1.0) augmentation_dict = augmentation.serialize() deserialized_augmentation = deserialize_tao_object(augmentation_dict) self.assertEqual( str(augmentation._transform), str(deserialized_augmentation._transform) )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/random_shear_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Processor for cropping images and labels.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from nvidia_tao_tf1.blocks.multi_source_loader.processors.transform_processor import ( TransformProcessor, ) from nvidia_tao_tf1.core.coreobject import save_args from nvidia_tao_tf1.core.processors import Crop as _Crop class Crop(TransformProcessor): """Crop processor.""" @save_args def __init__(self, left, top, right, bottom): """Create a processor for cropping frames and labels. The origin of the coordinate system is at the top-left corner. Coordinates keep increasing from left to right and from top to bottom. top -------- left | | | | right -------- bottom Args: left (int): Left edge before which contents will be discarded. top (int): Top edge above which contents will be discarded. right (int): Right edge after which contents will be discarded bottom (int): Bottom edge after which contents will be discarded. """ super(Crop, self).__init__( _Crop(left=left, top=top, right=right, bottom=bottom) )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/crop.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Generator function for iterating over sparse tensors with corresponding indices.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np def sparse_generator(index_prefix_size, values_array, indices_array): """Function which returns a generator for iterating through elements of a sparse tensor. Iterates through elements with matching sub indices. Args: index_prefix_size (int): The number of indices, starting with the 0th to include in the sub indices. values_array(np.array): Values array for the sparse tensor. indices_array(np.array): Indices array for the sparse tensor. Returns: Generator which returns the current sub-index, indices and corresponding values for that sub index. Example: Iterate through the vertices for each polygon in a 4D tensor of polygons with the following indices B, S, V, C where B=Batch, S=Shape, V=Vertex and C=Coordinate indices_array = [[2, 0, 0, 0], [2, 0, 0, 1], [2, 0, 1, 0], [2, 0, 1, 1], [2, 0, 2, 0], [2, 0, 2, 1], [2, 1, 0, 0], [2, 1, 0, 1], [2, 1, 1, 0], [2, 1, 1, 1], [2, 1, 2, 0], [2, 1, 2, 1], [3, 1, 0, 0], [3, 1, 0, 1], [3, 1, 1, 0], [3, 1, 1, 1], [3, 1, 2, 0], [3, 1, 2, 1]]) values_array = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17] generator = sparse_generator(2, values_array, indices_array) sub_index, values, indices = next(generator) # sub_index [2, 0] # values [0, 1, 2, 3, 4, 5] # indices [[2, 0, 0, 0], [2, 0, 0, 1], # [2, 0, 1, 0], [2, 0, 1, 1], # [2, 0, 2, 0], [2, 0, 2, 1]] # sub_index, values, indices = next(generator) # sub index [2, 1] # values [6, 7, 8, 9, 10, 11] # indices [[2, 1, 0, 0], [2, 1, 0, 1], # [2, 1, 1, 0], [2, 1, 1, 1], # [2, 1, 2, 0], [2, 1, 2, 1]] # sub_index, values, indices = next(generator) # sub index [3, 1] # values [12, 13, 14, 15, 16, 17] # indices [[3, 1, 0, 0], [3, 1, 0, 1], # [3, 1, 1, 0], [3, 1, 1, 1], # [3, 1, 2, 0], [3, 1, 2, 1]] """ index = 0 while index < values_array.size: curr_sub_index = indices_array[index][0:index_prefix_size] values = [] indices = [] while index < values_array.size: row_sub_index = indices_array[index][0:index_prefix_size] if np.array_equal(curr_sub_index, row_sub_index): values.append(values_array[index]) indices.append(list(indices_array[index])) index += 1 else: break yield list(curr_sub_index), values, indices def matching_indices_generator( index_prefix_size, sparse0_values, sparse0_indices, sparse1_values, sparse1_indices ): """Function which returns a generator for iterating through elements in two sparse tensors. Iterates through elements with common subindices across two tensors. Args: index_prefix_size (int): The number of indices, starting with the 0th, to include in the sub indices. sparse0_values(np.array): Values array for the first sparse tensor. sparse0_indices(np.array): Indices array for the first sparse tensor. sparse1_values(np.array): Values array for the second sparse tensor. sparse1_indices(np.array): Indices array for the second sparse tensor. Returns: Generator which returns the current sub-index, indices and corresponding values for that sub index for two sparse tensor. Example usage: Iterate through a sparse tensor of polygon vertices and get corresponding class ids for the same polygons. for sub_index, polygon_vertices, polygon_indices, class_values, class_indices in matching_indices_generator(3, polygon_vertices_values, polygon_vertices_indices, class_values, class_indices): # sub_index: holds the common sub-index shared by the polygon vertices and class values # polygon_vertices: contains the vertices for a polygon # polygon_indices: contains the indices for those vertices # class_values: contains the corresponding class(es) for the polygon # class_indices: contains the indices for those classes. """ end = (None, None, None) sparse0_iterator = sparse_generator( index_prefix_size, sparse0_values, sparse0_indices ) sparse1_iterator = sparse_generator( index_prefix_size, sparse1_values, sparse1_indices ) sparse0_index, sparse0_values, sparse0_indices = next(sparse0_iterator, end) sparse1_index, sparse1_values, sparse1_indices = next(sparse1_iterator, end) def _index_less_than(index_0, index_1): for i in range(len(index_0)): if index_0[i] == index_1[i]: continue else: return index_0[i] < index_1[i] return False while sparse0_index is not None or sparse1_index is not None: if sparse0_index is None: yield sparse1_index, [], [], sparse1_values, sparse1_indices sparse1_index, sparse1_values, sparse1_indices = next(sparse1_iterator, end) elif sparse1_index is None: yield sparse0_index, sparse0_values, sparse0_indices, [], [] sparse0_index, sparse0_values, sparse0_indices = next(sparse0_iterator, end) elif np.array_equal(sparse0_index, sparse1_index): yield sparse0_index, sparse0_values, sparse0_indices, sparse1_values, sparse1_indices sparse0_index, sparse0_values, sparse0_indices = next(sparse0_iterator, end) sparse1_index, sparse1_values, sparse1_indices = next(sparse1_iterator, end) elif _index_less_than(sparse0_index, sparse1_index): yield sparse0_index, sparse0_values, sparse0_indices, [], [] sparse0_index, sparse0_values, sparse0_indices = next(sparse0_iterator, end) else: yield sparse1_index, [], [], sparse1_values, sparse1_indices sparse1_index, sparse1_values, sparse1_indices = next(sparse1_iterator, end)
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/sparse_generators.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test for sparse tensor generator.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from nvidia_tao_tf1.blocks.multi_source_loader.processors import sparse_generators tensor0_indices = np.array( [ [2, 0, 0, 0], [2, 0, 0, 1], [2, 0, 1, 0], [2, 0, 1, 1], [2, 0, 2, 0], [2, 0, 2, 1], [2, 1, 0, 0], [2, 1, 0, 1], [2, 1, 1, 0], [2, 1, 1, 1], [2, 1, 2, 0], [2, 1, 2, 1], [3, 1, 0, 0], [3, 1, 0, 1], [3, 1, 1, 0], [3, 1, 1, 1], [3, 1, 2, 0], [3, 1, 2, 1], ] ) tensor0_values = np.array( [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17] ) tensor1_indices = np.array( [[0, 0, 0], [1, 0, 0], [1, 1, 0], [2, 0, 0], [2, 1, 0], [3, 0, 0], [3, 1, 0]] ) tensor1_values = np.array([0, 1, 2, 3, 4, 5, 6]) def test_sparse_iterator(): accumulated_sub_indices = [] accumulated_values = [] accumulated_indices = [] expected_values = [ [0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11], [12, 13, 14, 15, 16, 17], ] expected_indices = [ [ [2, 0, 0, 0], [2, 0, 0, 1], [2, 0, 1, 0], [2, 0, 1, 1], [2, 0, 2, 0], [2, 0, 2, 1], ], [ [2, 1, 0, 0], [2, 1, 0, 1], [2, 1, 1, 0], [2, 1, 1, 1], [2, 1, 2, 0], [2, 1, 2, 1], ], [ [3, 1, 0, 0], [3, 1, 0, 1], [3, 1, 1, 0], [3, 1, 1, 1], [3, 1, 2, 0], [3, 1, 2, 1], ], ] expected_sub_indices = [[2, 0], [2, 1], [3, 1]] for sub_index, values, indices in sparse_generators.sparse_generator( 2, tensor0_values, tensor0_indices ): accumulated_sub_indices.append(sub_index) accumulated_values.append(values) accumulated_indices.append(indices) assert accumulated_sub_indices == expected_sub_indices assert accumulated_values == expected_values assert accumulated_indices == expected_indices def test_matching_indices(): t0_values_array = [] t1_values_array = [] t0_indices_array = [] t1_indices_array = [] expected_t0_values = [ [], [], [], [0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11], [], [12, 13, 14, 15, 16, 17], ] expected_t1_values = [[0], [1], [2], [3], [4], [5], [6]] expected_t0_indices = [ [], [], [], [ [2, 0, 0, 0], [2, 0, 0, 1], [2, 0, 1, 0], [2, 0, 1, 1], [2, 0, 2, 0], [2, 0, 2, 1], ], [ [2, 1, 0, 0], [2, 1, 0, 1], [2, 1, 1, 0], [2, 1, 1, 1], [2, 1, 2, 0], [2, 1, 2, 1], ], [], [ [3, 1, 0, 0], [3, 1, 0, 1], [3, 1, 1, 0], [3, 1, 1, 1], [3, 1, 2, 0], [3, 1, 2, 1], ], ] expected_t1_indices = [ [[0, 0, 0]], [[1, 0, 0]], [[1, 1, 0]], [[2, 0, 0]], [[2, 1, 0]], [[3, 0, 0]], [[3, 1, 0]], ] for ( _, t0_values, t0_indices, t1_values, t1_indices, ) in sparse_generators.matching_indices_generator( 2, tensor0_values, tensor0_indices, tensor1_values, tensor1_indices ): t0_values_array.append(t0_values) t1_values_array.append(t1_values) t0_indices_array.append(t0_indices) t1_indices_array.append(t1_indices) assert t0_values_array == expected_t0_values assert t1_values_array == expected_t1_values assert t0_indices_array == expected_t0_indices assert t1_indices_array == expected_t1_indices def test_non_matching_indices(): tensor0 = [ ([0, 0, 1, 0], "Car"), ([0, 0, 7, 0], "Boat"), ([0, 0, 7, 1], "RV"), ([0, 0, 9, 0], "Car"), ([0, 0, 10, 0], "Car"), ([0, 0, 16, 0], "Car"), ([0, 0, 17, 0], "Car"), ([1, 0, 0, 0], "Car"), ([1, 0, 1, 0], "Car"), ([1, 0, 34, 0], "Car"), ] tensor0_indices = np.array([t[0] for t in tensor0]) tensor0_values = np.array([t[1] for t in tensor0]) tensor1 = [ ([0, 0, 0, 0], "object_5"), ([0, 0, 1, 0], "object_2"), ([0, 0, 9, 0], "object_6"), ([0, 0, 10, 0], "object_4"), ([1, 0, 34, 0], "object_2"), ([1, 0, 35, 0], "object_2"), ([1, 0, 35, 1], "object_3"), ] tensor1_indices = np.array([t[0] for t in tensor1]) tensor1_values = np.array([t[1] for t in tensor1]) expected_accumulation = [ # Only the second tensor has values for first sub index. ([0, 0, 0], [], [], [[0, 0, 0, 0]], ["object_5"]), # Both tensors have one value for this sub index ([0, 0, 1], [[0, 0, 1, 0]], ["Car"], [[0, 0, 1, 0]], ["object_2"]), # First tensor has two values, second tensor no values for this sub index ([0, 0, 7], [[0, 0, 7, 0], [0, 0, 7, 1]], ["Boat", "RV"], [], []), ([0, 0, 9], [[0, 0, 9, 0]], ["Car"], [[0, 0, 9, 0]], ["object_6"]), ([0, 0, 10], [[0, 0, 10, 0]], ["Car"], [[0, 0, 10, 0]], ["object_4"]), # First tensor has one value, second tensor has no values for this sub index ([0, 0, 16], [[0, 0, 16, 0]], ["Car"], [], []), ([0, 0, 17], [[0, 0, 17, 0]], ["Car"], [], []), ([1, 0, 0], [[1, 0, 0, 0]], ["Car"], [], []), ([1, 0, 1], [[1, 0, 1, 0]], ["Car"], [], []), ([1, 0, 34], [[1, 0, 34, 0]], ["Car"], [[1, 0, 34, 0]], ["object_2"]), # First tensor has no values, second tensor has two values for this sub-index ([1, 0, 35], [], [], [[1, 0, 35, 0], [1, 0, 35, 1]], ["object_2", "object_3"]), ] accumulation = [] for ( sub_index, t0_values, t0_indices, t1_values, t1_indices, ) in sparse_generators.matching_indices_generator( 3, tensor0_values, tensor0_indices, tensor1_values, tensor1_indices ): accumulation.append((sub_index, t0_indices, t0_values, t1_indices, t1_values)) assert accumulation == expected_accumulation
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/sparse_generators_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for AssetLoader.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections from mock import patch import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.processors.asset_loader import ( AssetLoader, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.sequence_example import ( SequenceExample, ) TestInstance = collections.namedtuple("TestInstance", ["load"]) TestLabel = collections.namedtuple("TestLabel", ["load"]) class AssetLoaderTest(tf.test.TestCase): def test_load_recurses_instances(self): with patch.object(TestInstance, "load") as mocked_load: example = SequenceExample( instances={"test": TestInstance(load="test")}, labels={} ) mocked_load.assert_not_called() AssetLoader()(example) mocked_load.assert_called_once() def test_load_recurses_labels(self): with patch.object(TestLabel, "load") as mocked_load: example = SequenceExample( instances={}, labels={"test": TestLabel(load="test")} ) mocked_load.assert_not_called() AssetLoader()(example) mocked_load.assert_called_once() def test_load_recurses_lists(self): with patch.object(TestLabel, "load") as mocked_load: example = SequenceExample( instances={}, labels={"test": [TestLabel(load="test")]} ) mocked_load.assert_not_called() AssetLoader()(example) mocked_load.assert_called_once() def test_load_recurses_sets(self): with patch.object(TestLabel, "load") as mocked_load: example = SequenceExample( instances={}, labels={"test": set([TestLabel(load="test")])} ) mocked_load.assert_not_called() AssetLoader()(example) mocked_load.assert_called_once() def test_load_recurses_dicts(self): with patch.object(TestLabel, "load") as mocked_load: example = SequenceExample( instances={}, labels={"test": {"child": TestLabel(load="test")}} ) mocked_load.assert_not_called() AssetLoader()(example) mocked_load.assert_called_once() def test_load_recurses_namedtuples_without_load(self): LoadlessNamedtuple = collections.namedtuple("LoadlessNamedtuple", ["instance"]) with patch.object(TestLabel, "load") as mocked_load: example = SequenceExample( instances={}, labels={"test": LoadlessNamedtuple(instance=TestLabel(load="test"))}, ) mocked_load.assert_not_called() AssetLoader()(example) mocked_load.assert_called_once() def test_does_not_recurse_into_namedtuples_with_load(self): # We consider namedtuples with an "load" method to be a leaf node and stop recursion # when we encounter one. TestLabelWithInstance = collections.namedtuple( "TestLabelWithInstance", ["load", "instance"] ) with patch.object(TestLabelWithInstance, "load") as mocked_load, patch.object( TestInstance, "load" ) as mocked_instance_load: example = SequenceExample( instances={}, labels={ "test": { "child": TestLabelWithInstance( load="test", instance=TestInstance(load="test") ) } }, ) mocked_load.assert_not_called() mocked_instance_load.assert_not_called() AssetLoader()(example) mocked_load.assert_called_once() mocked_instance_load.assert_not_called()
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/asset_loader_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Processor for applying random Gaussian blurring augmentation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.processors.filter2d_processor import ( Filter2DProcessor, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.gaussian_kernel import ( gaussian_kernel, ) from nvidia_tao_tf1.core.coreobject import save_args class RandomGaussianBlur(Filter2DProcessor): """GaussianBlur processor that randomly blurs images.""" @save_args def __init__( self, min_filter_size=2, max_filter_size=5, max_stddev=None, probability=None ): """Construct a RandomGaussianBlur processor. Args: min_filter_size (int): The mininum filter size of a Gassian blur filter. max_filter_size (int): The maximum filter size of a Gassian blur filter. max_stddev (float): The maximum standard deviation of the Gaussian blur filter's shape. probability (float): Probability at which blurring occurs. """ super(RandomGaussianBlur, self).__init__() # Set filter_size. if min_filter_size < 0: raise ValueError( "RandomGaussianBlur.min_filter_size ({}) must be an positive integer.".format( min_filter_size ) ) if max_filter_size < 0: raise ValueError( "RandomGaussianBlur.max_filter_size ({}) must be an positive integer.".format( max_filter_size ) ) if min_filter_size > max_filter_size: raise ValueError( "RandomGaussianBlur.min_filter_size ({}) must not be larger than \ RandomGaussianBlur.max_filter_size ({}).".format( min_filter_size, max_filter_size ) ) self._min_filter_size = min_filter_size self._max_filter_size = max_filter_size self._max_stddev = max_stddev # Set probability. if probability < 0.0 or probability > 1.0: raise ValueError( "RandomGaussianBlur.probability ({}) is not within the range " "[0.0, 1.0].".format(probability) ) self._probability = probability @property def probability(self): """Probability to apply filters.""" return self._probability def get_filters(self): """Return a list of filters. Because the filter is separable, each element contains a decomposed filter. """ # Get filter_size. filter_size = tf.random.uniform( minval=self._min_filter_size, maxval=self._max_filter_size + 1, dtype=tf.int32, shape=[], ) # Set stddev. if self._max_stddev is None: # Set stddev if not specified. stddev = ( tf.multiply( tf.multiply(tf.cast(filter_size, tf.float32) - 1, 0.5) - 1, 0.3 ) + 0.8 ) else: stddev = tf.random.uniform( minval=0, maxval=self._max_stddev, dtype=tf.float32, shape=[] ) # Get gaussian_kernel. gaussian_filter_list = [] # _gaussian_kernel is a tensor with size [filter_size, 1]. _gaussian_kernel = gaussian_kernel(filter_size, 0.0, stddev) gaussian_filter_list.append(_gaussian_kernel) gaussian_filter_list.append(tf.reshape(_gaussian_kernel, [1, filter_size])) return gaussian_filter_list
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/random_gaussian_blur.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Class for mapping objects to output unique instance ids.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.processors import sparse_generators from nvidia_tao_tf1.blocks.multi_source_loader.types import Polygon2DLabel from nvidia_tao_tf1.core.coreobject import save_args from nvidia_tao_tf1.core.processors import Processor class InstanceMapper(Processor): """InstanceMapper maps instance labels to instance ids.""" @save_args def __init__(self, default_has_instance, default_instance_id, exceptions, **kwargs): """Construct an InstanceMapper. Args default_has_instance (bool): The default hasInstance flag. To be applied if class name contains no substring in exceptions. default_instance_id (int): The default instance id. To be applied if class does not have instances by definition. exceptions(set of strings): Class will be excluded from default instance id assignment if class name contains any substring in this set. """ self._default_has_instance = default_has_instance self._default_instance_id = default_instance_id self._exception_set = exceptions super(InstanceMapper, self).__init__(**kwargs) def call(self, polygon_2d_label): """Map text class and attribute names to numeric ids. Args: polygon_2d_label (Polygon2DLabel): A label containing 2D polygons and their associated classes and attributes. If a 2D polygon captures a complete instance, its attribute is empty. If an instance consists of multiple polygons, these polygons will have a common attribute. Attributes are unique (unless empty) within class. Returns: (Polygon2DLabel): The label with the classes and attributes mapped to unique numeric id for each instance. """ # Classes are stored in a 4D tensor of shape [B, T, S, C], where # B=Batch (example within batch), T=Time step, S=Shape, C=Class(always 1) classes = polygon_2d_label.classes # Attributes are stored in a 4D tensor of shape [B, T, S, A ], where # B=Batch (example within batch), T=Time step, S=Shape, A=Attribute # 0 or more attributes per shape attributes = polygon_2d_label.attributes # Want to match all indices other than the class/attribute index. index_prefix_size = 3 def _mapper(class_values, class_indices, attribute_values, attributes_indices): # Initiate a dictionary to track occluded instances and assign id if instance exists. mapped_objects = {} # Initiate id counter. instance_id_counter = self._default_instance_id + 1 instance_ids = [] current_batch_index = current_frame_index = 0 for ( _, class_name, class_name_index, attribute_names, _, ) in sparse_generators.matching_indices_generator( index_prefix_size, class_values, class_indices, attribute_values, attributes_indices, ): # Clear id dictionary and reset instance id counter for every new frame. if ( current_batch_index != class_name_index[0][0] or current_frame_index != class_name_index[0][1] ): mapped_objects.clear() current_batch_index = class_name_index[0][0] current_frame_index = class_name_index[0][1] instance_id_counter = self._default_instance_id + 1 if len(class_name) != 1: print(class_name, end=" ") print( "Polygon is tagged with more than one class. Proceed with the first one." ) class_name = class_name[0].strip().lower().decode() _hasInstance = self._default_has_instance if any(elem in class_name for elem in self._exception_set): _hasInstance = not _hasInstance # Only update polygons that hasInstance is True by definition, # otherwise default id will be assigned. if not _hasInstance: instance_ids.append(self._default_instance_id) continue if attribute_names == []: instance_ids.append(instance_id_counter) instance_id_counter += 1 else: # Only polygons on same frame, with same class name and attribute # will be given identical instance id. _object_key = "{}_{}".format( class_name, attribute_names[0].strip().lower() ) if _object_key not in mapped_objects: mapped_objects[_object_key] = instance_id_counter instance_id_counter += 1 instance_ids.append(mapped_objects[_object_key]) return np.array(instance_ids, dtype=np.int32) mapped_ids = tf.compat.v1.py_func( _mapper, [classes.values, classes.indices, attributes.values, attributes.indices], tf.int32, stateful=False, ) return Polygon2DLabel( vertices=polygon_2d_label.vertices, classes=tf.SparseTensor( values=mapped_ids, indices=classes.indices, dense_shape=classes.dense_shape, ), attributes=tf.SparseTensor( values=attributes.values, indices=attributes.indices, dense_shape=attributes.dense_shape, ), )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/instance_mapper.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """DriveNet legacy mapper for occlusion and truncation labels. The raison d'etre for this Processor is for DriveNet consumers of ``Bbox2DLabel`` instances to maintain their interpretation of the occlusion and truncation fields that were inherited from lossy TFRecords conversions. It also removes the need to adapt the class mapping part of a DriveNet spec (see below why). This allows other (potentially new) consumers of the SqliteDataSource requesting 'BOX' type labels to consume ``Bbox2DLabel`` instances _without_ these lossy mappings applied (i.e. pretty much what comes out of HumanLoop). The lossy mappings that are applied concern: * truncation --> truncation_type. * occlusion For historical reasons as well, class names had whitespaces replaced with underscores. This is also taken care of here. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf import modulus.dataloader.humanloop_sqlite_dataset as hl_sql class DriveNetLegacyMapper(hl_sql.FeatureProcessor): """DriveNet legacy mapper for occlusion and truncation labels.""" # 0 = fully visible (or unknown), 1 = bottom occluded, # 2 = width occluded, 3 = bottom and width occluded. # Note: this follows the json converter convention of mapping 'unknown' to fully visible. OCCLUSION_MAPPING = { "unknown": 0, "full": 0, "bottom": 1, "width": 2, "bottomWidth": 3, } # 0 = not truncated (or unknown), 1 = left/right/left&right truncated, # 2 = bottom truncated, 3 = bottom & (left/right/left&right) truncated. TRUNCATION_MAPPING = { "unknown": 0, "full": 0, "bottom": 1, "left": 2, "right": 2, "leftRight": 2, "bottomLeft": 3, "bottomRight": 3, "bottomLeftRight": 3, } def add_fields(self, example): """Replace fields with int32 versions.""" example.labels["BOX"]["mapped_occlusion"] = hl_sql.create_derived_field( tf.int32, shape=None ) example.labels["BOX"]["mapped_truncation"] = hl_sql.create_derived_field( tf.int32, shape=None ) def filter(self, example_col_idx, dtype, row): """No filtering.""" return True def map(self, example_col_idx, dtype, row): """Do the label mappings.""" label_idx = example_col_idx.labels if dtype == "BOX": occlusion = row[label_idx["BOX"]["occlusion"]] occlusion = self.OCCLUSION_MAPPING.get( occlusion, self.OCCLUSION_MAPPING["unknown"] ) row[label_idx["BOX"]["mapped_occlusion"]] = occlusion truncation = row[label_idx["BOX"]["truncation"]] truncation = self.TRUNCATION_MAPPING.get( truncation, self.TRUNCATION_MAPPING["unknown"] ) row[label_idx["BOX"]["mapped_truncation"]] = truncation classifier = row[label_idx["BOX"]["classifier"]] classifier = classifier.replace(" ", "_") row[label_idx["BOX"]["classifier"]] = classifier return row
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/drivenet_legacy_mapper.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for RandomBrightness processor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from mock import patch from nvidia_tao_tf1.blocks.multi_source_loader.processors.processor_test_case import ( ProcessorTestCase, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.random_brightness import ( RandomBrightness, ) from nvidia_tao_tf1.core.coreobject import deserialize_tao_object from modulus.processors.augment.color import random_brightness_matrix class TestRandomBrightness(ProcessorTestCase): @patch( "modulus.processors.augment.color.random_brightness_matrix", side_effect=random_brightness_matrix, ) def test_delegates_to_random_brightness_matrix( self, spied_random_brightness_matrix ): example = self.make_example_128x240() augmentation = RandomBrightness(scale_max=0.5, uniform_across_channels=False) augmentation.process(example) spied_random_brightness_matrix.assert_called_with( brightness_scale_max=0.5, brightness_uniform_across_channels=False, batch_size=None, ) def test_serialization_and_deserialization(self): """Test that it is a TAOObject that can be serialized and deserialized.""" augmentation = RandomBrightness(scale_max=0.5, uniform_across_channels=False) augmentation_dict = augmentation.serialize() deserialized_augmentation = deserialize_tao_object(augmentation_dict) self.assertEqual( str(augmentation._transform), str(deserialized_augmentation._transform) )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/random_brightness_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Processor for rasterizing labels.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from nvidia_tao_tf1.core.coreobject import TAOObject, save_args from nvidia_tao_tf1.core.processors import SparsePolygonRasterizer as TAOPolygonRasterizer class PolygonRasterizer(TAOObject): """Processor that rasterizes labels.""" @save_args def __init__( self, height, width, nclasses, one_hot=False, binarize=False, converters=None, include_background=True, ): """ Construct a PolygonRasterizer processor. Args: height (int): Absolute height to rasterize at. width (int): Absolute width to rasterize at. nclasses (int): Number of distinct classes that labels can have. one_hot (Boolean): When True, rasterization produces rasters with class_count + 1 output channels. Channel at index N contains rasterized labels for class whose class id is N-1. 0th channel is reserved for background (no labels). When false, a single channel raster is generated with each pixel having a value one greater than the class id of the label it represents. Background where no label is present is represented with pixel value 0. binarize (Boolean): When one_hot is true, setting binarize=false will allow values between 0 and 1 to appear in rasterized labels. include_background (bool): If set to true, the rasterized output would also include the background channel at channel index=0. This parameter only takes effect when `one_hot` parameter is set to `true`. Default `true`. Raises: ValueError: When invalid or incompatible arguments are provided. """ super(PolygonRasterizer, self).__init__() if height < 1: raise ValueError("height: {} is not a positive number.".format(height)) if width < 1: raise ValueError("width: {} is not a positive number.".format(width)) self.converters = converters or [] self._rasterize = TAOPolygonRasterizer( width=width, height=height, nclasses=nclasses, one_hot=one_hot, binarize=binarize, data_format="channels_first", include_background=include_background, ) @property def one_hot(self): """Whether or not this rasterizer is using one hot encoding.""" return self._rasterize.one_hot def process(self, labels2d): """ Rasterize sparse tensors. Args: labels2d (Polygon2DLabel): A label containing 2D polygons/polylines and their associated classes and attributes. The first two dimensions of each tensor that this structure contains should be batch/example followed by a frame/time dimension. The rest of the dimensions encode type specific information. See Polygon2DLabel documentation for details Returns: (tf.Tensor): A dense tensor of shape [B, F, C, H, W] and type tf.float32. Note that the number of frames F must currently be the same for each example. """ for converter in self.converters: labels2d = converter.process(labels2d) dense_shape = labels2d.vertices.coordinates.dense_shape if self._rasterize.one_hot: channel_count = ( (self._rasterize.nclasses + 1) if self._rasterize.include_background else self._rasterize.nclasses ) else: channel_count = 1 def _no_empty_branch(): compressed_labels = labels2d.compress_frame_dimension() compressed_coordinates = compressed_labels.vertices.coordinates canvas_shape = labels2d.vertices.canvas_shape canvas_height = canvas_shape.height[0].shape.as_list()[-1] canvas_width = canvas_shape.width[0].shape.as_list()[-1] raster_width_factor = tf.cast( self._rasterize.width / canvas_width, tf.float32 ) raster_height_factor = tf.cast( self._rasterize.height / canvas_height, tf.float32 ) scale = tf.linalg.tensor_diag([raster_width_factor, raster_height_factor]) vertices_2d = tf.reshape(compressed_coordinates.values, [-1, 2]) scaled_vertices_2d = tf.matmul(vertices_2d, scale) compressed_coordinates = tf.SparseTensor( indices=compressed_coordinates.indices, values=tf.reshape(scaled_vertices_2d, [-1]), dense_shape=compressed_coordinates.dense_shape, ) compressed_rasterized = self._rasterize( polygons=compressed_coordinates, class_ids_per_polygon=compressed_labels.classes, ) uncompressed_rasterized = tf.reshape( compressed_rasterized, [ dense_shape[0], dense_shape[1], channel_count, self._rasterize.height, self._rasterize.width, ], ) return uncompressed_rasterized def _empty_branch(): return tf.zeros( [ dense_shape[0], dense_shape[1], channel_count, self._rasterize.height, self._rasterize.width, ], dtype=tf.float32, ) # This is to deal with the situation that the tf.sparse.reshape will throw an error when # labels2d.vertices.coordinates is an empty tensor. # This happens when all images in this batch have no polygons. rasterized = tf.cond( pred=tf.equal( tf.size(input=labels2d.vertices.coordinates.indices), tf.constant(0) ), true_fn=_empty_branch, false_fn=_no_empty_branch, ) batch_size = labels2d.vertices.canvas_shape.height.shape[0] max_timesteps_in_example = labels2d.vertices.coordinates.shape[1] rasterized.set_shape( [ batch_size, max_timesteps_in_example, channel_count, self._rasterize.height, self._rasterize.width, ] ) return rasterized
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/polygon_rasterizer.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for instance_mapper.py""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from parameterized import parameterized from nvidia_tao_tf1.blocks.multi_source_loader.processors.processor_test_case import ( ProcessorTestCase, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.sparse_to_dense_polyline import ( SparseToDensePolyline, ) from nvidia_tao_tf1.blocks.multi_source_loader.types import Polygon2DLabel from nvidia_tao_tf1.blocks.multi_source_loader.types.test_fixtures import ( make_coordinates2d, make_tags, ) def make_labels( shapes_per_frame, height, width, coordinates_per_polygon, coordinate_values, classes, attributes, ): return Polygon2DLabel( vertices=make_coordinates2d( shapes_per_frame=shapes_per_frame, height=height, width=width, coordinates_per_polygon=coordinates_per_polygon, coordinate_values=coordinate_values, ), classes=make_tags(classes), attributes=make_tags(attributes), ) class TestSparseToDensePolyline(ProcessorTestCase): @parameterized.expand( [ # Input to check if the output is computed correctly for vertices # having adjacent x axis values. [ [[1]], # Shapes per frame of Input. [[[[0]]]], # Classes. [[[[1]]]], # Attributes. 2, # Input Coordinates per polygon. [[1.0, 1.0], [2.0, 3.0]], # Input Coordinate values. [[1]], # Output shapes per frame. [[[[0]]]], # Output Classes. [[[[1]]]], # Output Attributes. 2, # Output coordinates per polygon [[1.0, 1.0], [2.0, 3.0]], # Output coordinate values. ], # Input to check if the output is computed correctly for vertices # with same x axis. [ [[1]], # Shapes per frame of Input. [[[[0]]]], # Classes. [[[[1]]]], # Attributes. 2, # Input Coordinates per polygon. [[1.0, 1.0], [1.0, 4.0]], # Input Coordinate values. [[1]], # Output shapes per frame. [[[[0]]]], # Output Classes. [[[[1]]]], # Output Attributes. 4, # Output coordinates per polygon [ [1.0, 1.0], [1.0, 2.0], [1.0, 3.0], [1.0, 4.0], ], # Output coordinate values. ], # Input to check if the output is computed correctly for vertices # with same x axis but reversed y values. [ [[1]], # Shapes per frame of Input. [[[[0]]]], # Classes. [[[[1]]]], # Attributes. 2, # Input Coordinates per polygon. [[1.0, 4.0], [1.0, 1.0]], # Input Coordinate values. [[1]], # Output shapes per frame. [[[[0]]]], # Output Classes. [[[[1]]]], # Output Attributes. 4, # Output coordinates per polygon [ [1.0, 4.0], [1.0, 3.0], [1.0, 2.0], [1.0, 1.0], ], # Output coordinate values. ], # Input to check if the op can compute dense vertices correctly # when multiple vertices are provided. [ [[1]], # Shapes per frame of Input. [[[[0]]]], # Classes. [[[[1]]]], # Attributes. 3, # Input Coordinates per polygon. [[1.0, 1.0], [4.0, 4.0], [7.0, 7.0]], # Input Coordinate values. [[1]], # Output shapes per frame. [[[[0]]]], # Output Classes. [[[[1]]]], # Output Attributes. 7, # Output coordinates per polygon [ [1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0], [5.0, 5.0], [6.0, 6.0], [7.0, 7.0], ], # Output coordinate values. ], # Input to check if the op can compute dense vertices for multiple # polylines in a frame. [ [[2]], # Shapes per frame of Input. [[[[0], [2]]]], # Classes. [[[[1], [0]]]], # Attributes. 3, # Input Coordinates per polygon. [ [1.0, 1.0], [4.0, 4.0], [7.0, 7.0], [8.0, 16.0], [10.0, 20.0], [14.0, 28.0], ], # Input Coordinate values. [[2]], # Output shapes per frame. [[[[0], [2]]]], # Output Classes. [[[[1], [0]]]], # Output Attributes. 7, # Output coordinates per polygon [ [1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0], [5.0, 5.0], [6.0, 6.0], [7.0, 7.0], [8.0, 16.0], [9.0, 18.0], [10.0, 20.0], [11.0, 22.0], [12.0, 24.0], [13.0, 26.0], [14.0, 28.0], ], # Output coordinate values. ], # Input to check if the op computes vertices provided with negative slope # correctly. [ [[2]], # Shapes per frame of Input. [[[[0], [2]]]], # Classes. [[[[1], [0]]]], # Attributes. 3, # Input Coordinates per polygon. [ [1.0, 1.0], [4.0, 4.0], [7.0, 7.0], [14.0, 28.0], [10.0, 20.0], [8.0, 16.0], ], # Input Coordinate values. [[2]], # Output shapes per frame. [[[[0], [2]]]], # Output Classes. [[[[1], [0]]]], # Output Attributes. 7, # Output coordinates per polygon [ [1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0], [5.0, 5.0], [6.0, 6.0], [7.0, 7.0], [14.0, 28.0], [13.0, 26.0], [12.0, 24.0], [11.0, 22.0], [10.0, 20.0], [9.0, 18.0], [8.0, 16.0], ], # Output coordinate values. ], ] ) def test_sparse_to_dense_polyline( self, shapes_per_frame, classes, attributes, coordinates_per_polygon, coordinate_values, expected_shapes_per_frame, expected_classes, expected_attributes, expected_coordinates_per_polygon, expected_coordinate_values, ): with self.session() as session: dense_polyline_converter = SparseToDensePolyline() input_labels = make_labels( shapes_per_frame, 10, 10, coordinates_per_polygon, coordinate_values, classes, attributes, ) expected_labels = make_labels( expected_shapes_per_frame, 10, 10, expected_coordinates_per_polygon, expected_coordinate_values, expected_classes, expected_attributes, ) output_labels = dense_polyline_converter(input_labels) input_labels, output_labels, expected_labels = session.run( [input_labels, output_labels, expected_labels] ) output_vertices_indices = output_labels.vertices.coordinates.indices output_vertices_values = output_labels.vertices.coordinates.values output_vertices_shape = output_labels.vertices.coordinates.dense_shape expected_vertices_indices = expected_labels.vertices.coordinates.indices expected_vertices_values = expected_labels.vertices.coordinates.values expected_vertices_shape = expected_labels.vertices.coordinates.dense_shape # Check if the output equals the expected self.assertAllEqual(output_vertices_indices, expected_vertices_indices) self.assertAllEqual(output_vertices_values, expected_vertices_values) self.assertAllEqual(output_vertices_shape, expected_vertices_shape)
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/sparse_to_dense_polyline_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Pipelines consists of chained together processors.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import logging from nvidia_tao_tf1.blocks.multi_source_loader.data_format import CHANNELS_FIRST from nvidia_tao_tf1.blocks.multi_source_loader.processors.convert_data_format import ( ConvertDataFormat, ) from nvidia_tao_tf1.blocks.multi_source_loader.types import ( SequenceExample, TransformedExample, ) from nvidia_tao_tf1.core.coreobject import TAOObject, save_args logger = logging.getLogger(__name__) def _compose(previous_processors, next_processor): """ Compose the last processor in a sequence with the next one if possible. Args: previous_processors (list of Processors): Processors that already been composed or None when nothing has been composed yet. next_processor (Processor): Processor to attempt composing. Returns: (list of Processors): Previous processors combined with the next processor. """ if not previous_processors: # Nothing to compose with return [next_processor] # Compose the last processor with the next one if possible. candidate = previous_processors[-1] if candidate.can_compose(next_processor): return previous_processors[:-1] + [candidate.compose(next_processor)] # Could not compose, add to existing list of processors. return previous_processors + [next_processor] def _compose_processors(processors): """ Compose all consecutive composable processors keeping non-composable processors intact. Args: processors (list of Processors): Processors to attempt composing. Returns: (list of Processors): List of processors that will be identical with the input list of processors when nothing could be composed. If composition was possible, the list will be shorter than the input list. When all processors were composable, the list will contain only one processor. """ return functools.reduce(_compose, processors, []) class Pipeline(TAOObject): """Configurable sequence of processors or transforming inputs (images, labels).""" @save_args def __init__( self, processors, input_data_format=CHANNELS_FIRST, output_data_format=CHANNELS_FIRST, compose=True, ): """ Construct a pipeline. The passed in processors will be processed in sequence and conversions between data formats will be done automatically based on the format of individual processors and the desired input and output data formats. Args: processors (list of `Processor`): Sequence of processors to pass inputs through. input_data_format (DataFormat): Data format to use for output examples. output_data_format (DataFormat): Data format of input examples. compose (Boolean): If True, pipeline will attempt to compose consecutive processors. """ super(Pipeline, self).__init__() self._processors = processors self._output_data_format = output_data_format self._input_data_format = input_data_format self._compose = compose self._is_built = False def _build(self): # Combine/compose consecutive processors when possible to reduce compute. processors = self._processors if self._compose: processors = _compose_processors(processors) # Add conversions that automatically handle NCHW <-> NHWC incompatibilities between # processors. self._processors = self._inject_conversion_processors( processors, self._input_data_format, self._output_data_format ) self._is_built = True def __len__(self): """Return the number of processors in this pipeline.""" return len(self._processors) def __add__(self, other): """Combine two pipelines.""" assert not self._is_built, "Combining built pipelines is not supported" return Pipeline( self._processors + other._processors, self._input_data_format, self._output_data_format, ) def __iter__(self): """Iterate over all processors in this pipeline.""" for processor in self._processors: yield processor def __repr__(self): """Return a string representation of this pipeline.""" return "Pipeline([{}], input_data_format={}, output_data_format={}, compose={}".format( ",".join([repr(processor) for processor in self._processors]), self._input_data_format, self._output_data_format, self._compose, ) def __getitem__(self, index): """Get processor at given location. Because conversion processors can be automatically inserted between other processors, the processor is not necessarily the same as would be returned by indexing into the list of processors passed to the constructor of this class. Args: index (int): Index of the processor to return. Returns: processor (Processor): Processor at the given index. """ return self._processors[index] def _inject_conversion_processors(self, processors, input_format, output_format): """Inject data format conversion processors in between processors.""" processors_with_conversions = [] previous_output_format = input_format # Naively insert data format conversions. TODO(vkallioniemi): make this minimize the # number of conversions done (build a tree, search for shortest path.) for i, processor in enumerate(processors): if previous_output_format not in processor.supported_formats: next_data_format = processor.supported_formats[0] logger.warning( "inserting conversion after processor %d: %s -> %s", i, previous_output_format, next_data_format, ) conversion = ConvertDataFormat(previous_output_format, next_data_format) processors_with_conversions.append(conversion) previous_output_format = next_data_format processor.data_format = previous_output_format processors_with_conversions.append(processor) if previous_output_format != output_format: logger.warning( "inserting conversion at the end of the pipeline: %s -> %s", previous_output_format, output_format, ) conversion = ConvertDataFormat(previous_output_format, output_format) processors_with_conversions.append(conversion) return processors_with_conversions def process(self, examples): """ Process examples by letting all processors process them in sequence. Args: examples (List of `Example`s): Input data with frames in input_data_format. Returns: (List of `Example`s): Processed examples with frames in output_data_format. Raises: ValueError: When passed invalid arguments. """ if not examples: raise ValueError("Pipeline.process called with 0 arguments.") processed_examples = [] for example in examples: processed_example = self(example) processed_examples.append(processed_example) return processed_examples def __call__(self, example): """ Process example by letting all processors process it in sequence. Args: example (Example): Input data with frames in input_data_format. Returns: (Example): Processed example with frames in output_data_format. Raises: ValueError: When passed invalid arguments. """ if not self._is_built: self._build() processed_example = example for processor in self._processors: # SequenceExamples are always channels_first if isinstance(processor, ConvertDataFormat) and isinstance( processed_example, (SequenceExample, TransformedExample) ): continue processed_example = processor.process(processed_example) return processed_example
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/pipeline.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for RandomTranslation processor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import re from mock import patch from parameterized import parameterized from nvidia_tao_tf1.blocks.multi_source_loader.processors.processor_test_case import ( ProcessorTestCase, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.random_translation import ( RandomTranslation, ) from nvidia_tao_tf1.core.coreobject import deserialize_tao_object from modulus.processors.augment.spatial import random_translation_matrix def _esc(message): """Escape passed in string for regular expressions.""" return re.escape(message) class TestRandomTranslation(ProcessorTestCase): @parameterized.expand( [ [ -0.1, _esc( "RandomTranslation.probability (-0.1) is not within the range [0.0, 1.0]." ), ], [ 1.1, _esc( "RandomTranslation.probability (1.1) is not within the range [0.0, 1.0]." ), ], ] ) def test_raises_on_invalid_probability(self, probability, message): with self.assertRaisesRegexp(ValueError, message): RandomTranslation(max_x=7, max_y=7, probability=probability) @patch( "modulus.processors.augment.spatial.random_translation_matrix", side_effect=random_translation_matrix, ) def test_delegates_to_random_translation_matrix( self, spied_random_translation_matrix ): example = self.make_example_128x240() augmentation = RandomTranslation(max_x=90, max_y=45, probability=1.0) augmentation.process(example) spied_random_translation_matrix.assert_called_with( max_x=90, max_y=45, batch_size=None ) def test_serialization_and_deserialization(self): """Test that it is a TAOObject that can be serialized and deserialized.""" augmentation = RandomTranslation(max_x=90, max_y=45, probability=1.0) augmentation_dict = augmentation.serialize() deserialized_augmentation = deserialize_tao_object(augmentation_dict) self.assertEqual( str(augmentation._transform), str(deserialized_augmentation._transform) )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/random_translation_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for LabelAdjustment processor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import re from mock import Mock import numpy as np from parameterized import parameterized import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.data_format import CHANNELS_FIRST from nvidia_tao_tf1.blocks.multi_source_loader.processors.label_adjustment import ( LabelAdjustment, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.processor_test_case import ( ProcessorTestCase, ) from nvidia_tao_tf1.blocks.multi_source_loader.types import ( Canvas2D, Coordinates2D, Example, FEATURE_CAMERA, LABEL_MAP, Polygon2DLabel, SequenceExample, ) import nvidia_tao_tf1.blocks.multi_source_loader.types.test_fixtures as fixtures from nvidia_tao_tf1.core.coreobject import deserialize_tao_object def make_sequence_example(coordinates): frames = tf.ones((1, 128, 240, 3)) coordinates_2d = Coordinates2D( coordinates=coordinates, canvas_shape=Canvas2D(height=0, width=0) ) polygon_label_2d = Polygon2DLabel( vertices=coordinates_2d, classes=fixtures.make_tags([[[[1]]]]), attributes=fixtures.make_tags([[[[1]]]]), ) return SequenceExample( instances={FEATURE_CAMERA: frames}, labels={LABEL_MAP: polygon_label_2d} ) class TestLabelAdjustment(ProcessorTestCase): @parameterized.expand( [ [0, 2, 4, re.escape("Scale: 0 is not positive.")], [-1, 2, 4, re.escape("Scale: -1 is not positive.")], [1, -1, 4, re.escape("Translation x: -1 cannot be a negative number.")], [1, 2, -1, re.escape("Translation y: -1 cannot be a negative number.")], ] ) def test_raises_on_invalid_arguments( self, scale, translation_x, translation_y, message ): with self.assertRaisesRegexp(ValueError, message): LabelAdjustment( scale=scale, translation_x=translation_x, translation_y=translation_y ) def test_supports_channels_first(self): label_adjustment = LabelAdjustment() assert label_adjustment.supported_formats == [CHANNELS_FIRST] def test_does_not_compose(self): label_adjustment = LabelAdjustment() assert label_adjustment.can_compose(Mock()) is False def test_compose_raises(self): with self.assertRaises(NotImplementedError): label_adjustment = LabelAdjustment() label_adjustment.compose(Mock()) def test_adjust_scale(self): frames = tf.ones((1, 128, 240, 3)) labels = self.make_polygon_label( vertices=[[60, 32], [180, 32], [180, 96], [60, 96]] ) example = Example( instances={FEATURE_CAMERA: frames}, labels={LABEL_MAP: labels} ) expected_labels = self.make_polygon_label( vertices=[[30, 16], [90, 16], [90, 48], [30, 48]] ) with self.test_session(): label_adjustment = LabelAdjustment(scale=0.5) adjusted = label_adjustment.process(example) self.assert_labels_close(expected_labels, adjusted.labels[LABEL_MAP]) def test_adjust_translation(self): frames = tf.ones((1, 128, 240, 3)) polygon = self.make_polygon_label( vertices=[[60, 32], [180, 32], [180, 96], [60, 96]] ) example = Example( instances={FEATURE_CAMERA: frames}, labels={LABEL_MAP: polygon} ) expected_labels = self.make_polygon_label( vertices=[[40, 2], [160, 2], [160, 66], [40, 66]] ) with self.test_session(): label_adjustment = LabelAdjustment(translation_x=20, translation_y=30) adjusted = label_adjustment.process(example) self.assert_labels_close(expected_labels, adjusted.labels[LABEL_MAP]) def test_adjust_scale_and_translation(self): frames = tf.ones((1, 128, 240, 3)) polygon = self.make_polygon_label( vertices=[[60, 32], [180, 32], [180, 96], [60, 96]] ) example = Example( instances={FEATURE_CAMERA: frames}, labels={LABEL_MAP: polygon} ) expected_labels = self.make_polygon_label( vertices=[[10, -14], [70, -14], [70, 18], [10, 18]] ) with self.test_session(): label_adjustment = LabelAdjustment( scale=0.5, translation_x=20, translation_y=30 ) adjusted = label_adjustment.process(example) self.assert_labels_close(expected_labels, adjusted.labels[LABEL_MAP]) def test_sequence_example_adjust_scale(self): coordinates = tf.SparseTensor( indices=[ [0, 0, 0, 0, 0], [0, 0, 0, 0, 1], [0, 0, 0, 1, 0], [0, 0, 0, 1, 1], [0, 0, 0, 2, 0], [0, 0, 0, 2, 1], [0, 0, 0, 3, 0], [0, 0, 0, 3, 1], ], values=tf.constant([60, 32, 180, 32, 180, 96, 60, 96], dtype=tf.float32), dense_shape=[1, 1, 1, 4, 2], ) sequence_example = make_sequence_example(coordinates) expected_coordinates = tf.SparseTensor( indices=coordinates.indices, values=tf.constant([30, 16, 90, 16, 90, 48, 30, 48]), dense_shape=coordinates.dense_shape, ) with self.test_session(): label_adjustment = LabelAdjustment(scale=0.5) adjusted = label_adjustment.process(sequence_example) self.assertAllClose( expected_coordinates.eval(), adjusted.labels[LABEL_MAP].vertices.coordinates.eval(), rtol=1e-3, atol=1e-3, ) def test_sequence_example_adjust_translation(self): coordinates = tf.SparseTensor( indices=[ [0, 0, 0, 0, 0], [0, 0, 0, 0, 1], [0, 0, 0, 1, 0], [0, 0, 0, 1, 1], [0, 0, 0, 2, 0], [0, 0, 0, 2, 1], [0, 0, 0, 3, 0], [0, 0, 0, 3, 1], ], values=tf.constant([60, 32, 180, 32, 180, 96, 60, 96], dtype=tf.float32), dense_shape=[1, 1, 1, 4, 2], ) sequence_example = make_sequence_example(coordinates) expected_coordinates = tf.SparseTensor( indices=coordinates.indices, values=tf.constant([40, 2, 160, 2, 160, 66, 40, 66]), dense_shape=coordinates.dense_shape, ) with self.test_session(): label_adjustment = LabelAdjustment(translation_x=20, translation_y=30) adjusted = label_adjustment.process(sequence_example) self.assertAllClose( expected_coordinates.eval(), adjusted.labels[LABEL_MAP].vertices.coordinates.eval(), rtol=1e-3, atol=1e-3, ) def test_sequence_example_adjust_scale_and_translation(self): coordinates = tf.SparseTensor( indices=[ [0, 0, 0, 0, 0], [0, 0, 0, 0, 1], [0, 0, 0, 1, 0], [0, 0, 0, 1, 1], [0, 0, 0, 2, 0], [0, 0, 0, 2, 1], [0, 0, 0, 3, 0], [0, 0, 0, 3, 1], ], values=tf.constant([60, 32, 180, 32, 180, 96, 60, 96], dtype=tf.float32), dense_shape=[1, 1, 1, 4, 2], ) sequence_example = make_sequence_example(coordinates) expected_coordinates = tf.SparseTensor( indices=coordinates.indices, values=tf.constant([10, -14, 70, -14, 70, 18, 10, 18]), dense_shape=coordinates.dense_shape, ) with self.test_session(): label_adjustment = LabelAdjustment( scale=0.5, translation_x=20, translation_y=30 ) adjusted = label_adjustment.process(sequence_example) self.assertAllClose( expected_coordinates.eval(), adjusted.labels[LABEL_MAP].vertices.coordinates.eval(), rtol=1e-3, atol=1e-3, ) def test_sequence_example_adjust_scale_and_translation_empty(self): empty_indices = tf.zeros((0, 4), tf.int64) empty_values = tf.constant([], dtype=tf.float32) empty_dense_shape = [0, 0, 0, 0] empty_coordinates = tf.SparseTensor( indices=empty_indices, values=empty_values, dense_shape=empty_dense_shape ) sequence_example = make_sequence_example(empty_coordinates) with self.test_session(): label_adjustment = LabelAdjustment( scale=0.5, translation_x=20, translation_y=30 ) adjusted = label_adjustment.process(sequence_example) adjusted_coordinates = adjusted.labels[LABEL_MAP].vertices.coordinates adjusted_indices = adjusted_coordinates.indices adjusted_values = adjusted_coordinates.values adjusted_dense_shape = adjusted_coordinates.dense_shape # assertAllEqual seems to have problems with empty tensors so have to revert to this # manual checking. self.assertTrue( np.array_equal(empty_indices.eval(), adjusted_indices.eval()) ) self.assertTrue(np.array_equal(empty_values.eval(), adjusted_values.eval())) self.assertAllEqual(empty_dense_shape, adjusted_dense_shape.eval()) def test_serialization_and_deserialization(self): """Test that it is a TAOObject that can be serialized and deserialized.""" label_adjustment = LabelAdjustment( scale=0.5, translation_x=20, translation_y=30 ) label_adjustment_dict = label_adjustment.serialize() deserialized_label_adjustment = deserialize_tao_object(label_adjustment_dict) assert label_adjustment._scale == deserialized_label_adjustment._scale assert ( label_adjustment._translation_x == deserialized_label_adjustment._translation_x ) assert ( label_adjustment._translation_y == deserialized_label_adjustment._translation_y )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/label_adjustment_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Processor for rasterizing labels.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.data_format import CHANNELS_LAST from nvidia_tao_tf1.blocks.multi_source_loader.processors.processor import ( Processor, ) from nvidia_tao_tf1.blocks.multi_source_loader.types import ( Example, FEATURE_CAMERA, LABEL_MAP, ) from nvidia_tao_tf1.core.coreobject import save_args from nvidia_tao_tf1.core.processors import PolygonRasterizer class RasterizeAndResize(Processor): """Processor that rasterizes labels and optionally resizes frames to match raster size.""" @save_args def __init__( self, height, width, class_count, one_hot=False, binarize=False, resize_frames=False, resize_method=tf.image.ResizeMethod.BILINEAR, ): """ Construct a RasterizeAndResize processor. Args: height (int): Absolute height to rasterize at and optionally resize frames to. width (int): Absolute height to rasterize at and optionally resize frames to. class_count (int): Number of distinct classes that labels can have. one_hot (Boolean): When True, rasterization produces rasters with class_count + 1 output channels. Channel at index N contains rasterized labels for class whose class id is N-1. 0th channel is reserved for background (no labels). When false, a single channel raster is generated with each pixel having a value one greater than the class id of the label it represents. Background where no label is present is represented with pixel value 0. binarize (Boolean): When one_hot is true, setting binarize=false will allow values between 0 and 1 to appear in rasterized labels. resize (Boolean): Whether to resize images/frames to match the raster size. resize_method (tf.image.ResizeMethod): Method used to resize frames. Raises: ValueError: When invalid or incompatible arguments are provided. """ super(RasterizeAndResize, self).__init__() if height < 1: raise ValueError("height: {} is not a positive number.".format(height)) if width < 1: raise ValueError("width: {} is not a positive number.".format(width)) if resize_method not in [ tf.image.ResizeMethod.BILINEAR, tf.image.ResizeMethod.NEAREST_NEIGHBOR, tf.image.ResizeMethod.BICUBIC, tf.image.ResizeMethod.AREA, ]: raise ValueError("Unrecognized resize_method: '{}'.".format(resize_method)) self._height = height self._width = width self._resize_frames = resize_frames self._resize_method = resize_method self._rasterize = PolygonRasterizer( width=width, height=height, nclasses=class_count, one_hot=one_hot, binarize=binarize, data_format="channels_first", ) @property def supported_formats(self): """Data formats supported by this processor.""" return [CHANNELS_LAST] def can_compose(self, other): """ Determine whether two processors can be composed into a single one. Args: other (Processor): Other processor instance. Returns: (Boolean): Always False - composition not supporoted. """ return False def compose(self, other): """Compose two processors into a single one. Args: other (Processor): Other processor instance. Raises: NotImplementedError: Composition not supported. """ raise NotImplementedError("Composition not supported.") def process(self, example): """ RasterizeAndResize labels and optionally resize images/frames to match raster size. The rasterized labels are stored in in NHWC format. Args: example (Example): Example to rasterize. Returns: (Example): Example with resized frames and rasterized labels. """ frames = example.instances[FEATURE_CAMERA] labels = example.labels[LABEL_MAP] height = frames.get_shape().as_list()[1] width = frames.get_shape().as_list()[2] if self._resize_frames: frames = tf.image.resize( frames, size=(self._height, self._width), method=self._resize_method ) raster_width_factor = self._width / width raster_height_factor = self._height / height polygons = tf.matmul( labels.polygons, tf.constant([[raster_width_factor, 0.0], [0.0, raster_height_factor]]), ) rasterized = self._rasterize( polygon_vertices=polygons, vertex_counts_per_polygon=labels.vertices_per_polygon, class_ids_per_polygon=labels.class_ids_per_polygon, polygons_per_image=None, ) # Rasterizes to CHW, but this Processor is NHWC rasterized = tf.expand_dims(tf.transpose(a=rasterized, perm=[1, 2, 0]), axis=0) return Example( instances={FEATURE_CAMERA: frames}, labels={LABEL_MAP: rasterized} )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/rasterize_and_resize.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Class for priors generation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.processors.polyline_clipper import ( PolylineClipper, ) COORDINATES_PER_POINT = 4 EDGES_PER_PATH = 2 class PriorsGenerator(object): """Class for priors generation. The functions in this class deal with programmatically generating the priors for path data, which is composed of arbitrary polygons specified by left and right edges (top and bottom edges are implied by the bottom and top coordinates of the left and right edges). The basic algorithm for generating priors proceeds with the following steps: (1) Find the center of the receptive fields for all neurons in the network layer at which you are generating priors. (2) Generate a set of npriors priors based on points and linear shapes in a normalized coordinate frame (centered at origin, spanning from -1 to 1). - There can only be 1 point prior per receptive field, located at the center of the receptive field. - There is only one linear shape: a horizontal line from -1 to 1. - Number of point and linear priors are specified in the experiment spec. (3) Copy and rotate the generated priors by a set of angles to produce a set of npriors priors. (4) Translate the priors to each receptive field center. (5) Scale the priors to match the aspect ratio of the image. (6) Verify that all points on the prior are within the image, and if not, clip and then inter- polate such that the priors are within the image borders but still have points_per_prior points. """ def __init__( self, npoint_priors, nlinear_priors, points_per_prior, prior_threshold, feature_map_sizes, image_height, image_width, ): """ Initialize priors related variables. Number of point and linear priors, points per prior, prior threshold, feature map sizes extracted from the model, and the image height and width. """ if npoint_priors < 0: raise ValueError( "npoints_priors must be positive, it is {}.".format(npoint_priors) ) self.npoint_priors = npoint_priors if nlinear_priors < 0: raise ValueError( "nlinear_priors must be positive, it is {}.".format(nlinear_priors) ) self.nlinear_priors = nlinear_priors self.npriors = npoint_priors + nlinear_priors if self.npriors <= 0: raise ValueError("npriors must be > 0, it is {}.".format(self.npriors)) if points_per_prior <= 0: raise ValueError( "points_per_prior must be positive, not {}.".format(points_per_prior) ) self.points_per_prior = points_per_prior self.prior_threshold = prior_threshold self.path_width = 100.0 self.normalized_linear_prior_length = 0.625 self._polyline_clipper = PolylineClipper(self.points_per_prior) self.nall_priors = self._get_nall_priors(feature_map_sizes) self.priors = self._get_priors(feature_map_sizes, image_height, image_width) if self.nall_priors < 1: raise ValueError( "There must be at least one prior, instead {}.".format(self.nall_priors) ) if self.priors is None: raise ValueError("There is not any prior set.") def _transform_prior( self, origin, xs, ys, angle, scale_x=1.0, scale_y=1.0, tx=0.0, ty=0.0 ): """ Rotate, translate and scale points clockwise by a given angle around a given origin. Args: origin (scalar, scalar): Point about which to rotate the points. xs, ys (1D tensor, 1D tensor): x and y coordinates to be rotated. angle (scalar): Angle in radians to rotate the points by. scale_x (scalar): Multiplier for x coordinates. scale_y (scalar): Multiplier for y coordinates. tx (scalar): Amount to translate the points in the x-dimension. ty (scalar): Amount to translate the points in the y-dimension. Returns: newx, newy (1D tensor, 1D tensor): Rotated, translated and scaled points. """ ox, oy = tf.split(tf.cast(origin, tf.float32), [1, 1]) cos_angle = tf.cos(tf.cast(angle, tf.float32)) sin_angle = tf.sin(tf.cast(angle, tf.float32)) # Apply 2D rotation matrix with translation. newx = ox + cos_angle * (xs - ox) - sin_angle * (ys - oy) newy = oy + sin_angle * (xs - ox) + cos_angle * (ys - oy) # Scale and translate the resulting points. newx = scale_x * newx + tx newy = scale_y * newy + ty return newx, newy def _get_prior_locations( self, receptive_field_x, receptive_field_y, image_height, image_width ): """ Calculate the locations at which to place each set of priors. Args: receptive_field_x (int): Size of receptive field in the x-dimension (in pixels). receptive_field_y (int): Size of receptive field in the y-dimension (in pixels). image_height (int): Height of the image. image_width (int): Width of the image. Returns: prior_locations (matrix of floats): x, y locations of the center of the prior sets in the image (in pixels). """ prior_locations_x = np.arange( (receptive_field_x / 2.0), image_width + 1 - (receptive_field_x / 2.0), receptive_field_x, ) prior_locations_y = np.arange( (receptive_field_y / 2.0), image_height + 1 - (receptive_field_y / 2.0), receptive_field_y, ) prior_locations = np.meshgrid(prior_locations_x, prior_locations_y) return prior_locations def _generate_point_priors(self, x, y, image_height, image_width): """ Create the point priors. Priors are created by replicating the center point of the receptive field, tx and ty, points_per_priors times. This means that each path will be coded as an offset from the center of the receptive field. Args: x (float): Center of receptive field in x-dimension. y (float): Center of receptive field in y-dimension. image_height (int): Height of the image. image_width (int): Width of the image. Returns: priors (array of floats): Ordered as leftx, lefty, rightx, righty coordinates. """ norm_x = x * (1.0 / image_width) norm_y = y * (1.0 / image_height) # Convert to tensors and expand number of points. priors = tf.tile( tf.cast([[norm_x, norm_y]], tf.float32), [EDGES_PER_PATH * self.points_per_prior, 1], ) return priors def _generate_linear_priors( self, angles, scale_x, scale_y, tx, ty, image_height, image_width ): """ Create the linear priors. Priors are created by generating a horizontal line with points_per_prior points then rotating, translating and scaling by the amounts specified. Args: angles (array of floats): Angles to rotate the lines by in radians. scale_x (scalar): Extent of priors in x-direction. scale_y (scalar): Extent of priors in y-direction. tx (scalar): Amount to translate the points in the x-dimension. ty (scalar): Amount to translate the points in the y-dimension. image_height (int): Height of the image. image_width (int): Width of the image. Returns: priors (array of floats): Ordered as leftx, lefty, rightx, righty coordinates. """ # Generate the base edge priors, which will then be modified based on the spec parameters. # The default prior is a horizontal vector that points to the left and is centered at # the origin. The angles that this vector will be rotated by are all positive and less # than 180 degrees and thus rotate the vector clockwise about the origin. We use a # horizontal left pointing vector so that the ordering of the points is always from bottom # to top. That is, the vector always starts in the lower half-plane and ends in the upper # half plane. base_xs = tf.linspace(1.0, -1.0, self.points_per_prior) base_ys = tf.zeros(self.points_per_prior, dtype=tf.float32) base_widthx = tf.constant([0.0, 0.0], dtype=tf.float32) base_widthy = tf.constant([0.0, (self.path_width / 2.0)], dtype=tf.float32) for edge in ["Left", "Right"]: # Create one prior for each rotation angle. prior_xs = [] prior_ys = [] for angle in angles: assert 0.0 <= angle <= np.pi, ( "angle to rotate the linear prior by is %.2f," "which outside the range 0 to pi." % angle ) # Rotate and scale the prior edges. imagex, imagey = self._transform_prior( (0, 0), base_xs, base_ys, angle, scale_x, scale_y, tx, ty ) widthx, widthy = self._transform_prior( (0, 0), base_widthx, base_widthy, angle ) # Convert from centerline priors to edge priors. sign = -1 if "Left" == edge else 1.0 image_x = imagex + sign * widthx[-1] image_y = imagey + sign * widthy[-1] prior_xs.append(image_x / image_width) prior_ys.append(image_y / image_height) zipped_priors = tf.stack( [ tf.reshape(tf.stack(prior_xs), [-1]), tf.reshape(tf.stack(prior_ys), [-1]), ], axis=1, ) # Clip the priors to the image boundaries. Use image_height and image_width values of 1 # as the priors are normalized above. Need to maintain the vertex number since all # priors are required to have the same number of vertices. vertices_per_polyline = tf.constant( self.points_per_prior, shape=[len(angles)], dtype=tf.int32 ) polygon_mask = tf.constant( [[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]], tf.float32 ) clipped_priors, clipped_polyline_index_map = self._polyline_clipper.clip( polylines=zipped_priors, vertices_per_polyline=vertices_per_polyline, class_ids_per_polyline=tf.zeros_like(vertices_per_polyline), attributes_per_polyline=tf.zeros_like(vertices_per_polyline), maintain_vertex_number=True, polygon_mask=polygon_mask, )[0:2] if "Left" == edge: left_clipped_priors = clipped_priors left_clipped_map = clipped_polyline_index_map else: right_clipped_priors = clipped_priors right_clipped_map = clipped_polyline_index_map # Keep only priors where both edges are present because clipping can remove one or # the other edge entirely. left_clipped_maps, right_clipped_maps = tf.meshgrid( left_clipped_map, right_clipped_map ) valid_priors = tf.compat.v1.where( tf.equal(left_clipped_maps, right_clipped_maps) ) right_valid_priors, left_valid_priors = tf.split(valid_priors, 2, axis=1) # Get the point indices associated with this prior. point_indices = tf.reshape( tf.cumsum(tf.ones([len(angles) * self.points_per_prior], tf.int32)) - 1, [len(angles), self.points_per_prior], ) left_valid_point_indices = tf.gather( point_indices, tf.squeeze(left_valid_priors) ) right_valid_point_indices = tf.gather( point_indices, tf.squeeze(right_valid_priors) ) left_valid_prior_points = tf.reshape( tf.gather(left_clipped_priors, left_valid_point_indices), [-1, 2] ) right_valid_prior_points = tf.reshape( tf.gather(right_clipped_priors, right_valid_point_indices), [-1, 2] ) priors = tf.reshape( tf.transpose( a=tf.stack( [left_valid_prior_points, right_valid_prior_points], axis=-1 ), perm=[0, 2, 1], ), [-1, 2], ) return priors def _get_nall_priors(self, feature_map_sizes): """Get the number of priors.""" if len(feature_map_sizes) == 0: raise ValueError("Feature map sizes not yet set.") return np.sum([np.prod(fmaps) * self.npriors for fmaps in feature_map_sizes]) def _get_priors(self, feature_map_sizes, image_height, image_width): """ Generate path priors based on the image size and constraints. Returns: priors (array of floats): Ordered as leftx, lefty, rightx, righty coordinates. """ priors = None # For each feature map, calculate the priors in image coordinates. for feature_map_size in feature_map_sizes: # Calculate the receptive field sizes in the original image for this layer. receptive_field_x = np.floor(image_width / feature_map_size[1]) receptive_field_y = np.floor(image_height / feature_map_size[0]) # Specify the scale ratios for the priors. Based on analysis of ground truth # data average widths and heights of paths in the image. scale_x = self.normalized_linear_prior_length * receptive_field_x scale_y = self.normalized_linear_prior_length * receptive_field_y # Enumerate the receptive field center locations. prior_locations = self._get_prior_locations( receptive_field_x, receptive_field_y, image_height=image_height, image_width=image_width, ) # Calculate the parameters for the linear priors. The rotation angles should all # be positive so that the default linear prior vector is rotated clockwise. if self.nlinear_priors > 0: angle_increment = np.pi / (self.nlinear_priors + 1.0) linear_prior_rotation_angles = np.linspace( angle_increment, np.pi - angle_increment, self.nlinear_priors ) # Calculate the priors for each location. # TODO(blythe): Determine if can generate all base priors, copy, translate and # then clip, rather than doing this loop. Complicated because clipper doesn't # support the point priors. for tx, ty in zip( prior_locations[0].flatten(), prior_locations[1].flatten() ): if self.npoint_priors > 0: point_priors = self._generate_point_priors( tx, ty, image_height=image_height, image_width=image_width ) for _ in range(self.npoint_priors): priors = ( tf.concat([priors, point_priors], 0) if priors is not None else point_priors ) if self.nlinear_priors > 0: linear_priors = self._generate_linear_priors( linear_prior_rotation_angles, scale_x, scale_y, tx, ty, image_height=image_height, image_width=image_width, ) priors = ( tf.concat([priors, linear_priors], 0) if priors is not None else linear_priors ) return priors
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/priors_generator.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for TestTransformProcessor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from mock import Mock from nvidia_tao_tf1.blocks.multi_source_loader.data_format import ( CHANNELS_FIRST, CHANNELS_LAST, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.processor_test_case import ( ProcessorTestCase, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.transform_processor import ( TransformProcessor, ) from nvidia_tao_tf1.blocks.multi_source_loader.types import ( FEATURE_CAMERA, LABEL_MAP, ) class TestTransformProcessor(ProcessorTestCase): def test_construction_fails_when_transfomer_is_none(self): with self.assertRaises(ValueError): TransformProcessor(None) def test_supports_channels_last(self): processor = TransformProcessor(Mock()) assert processor.supported_formats == [CHANNELS_FIRST, CHANNELS_LAST] def test_process_fails_when_input_is_not_an_example(self): with self.assertRaises(TypeError): TransformProcessor(Mock()).process(Mock()) def test_uses_provided_transform(self): transform = Mock() processor = TransformProcessor(transform) # identity transform produces outputs identical to inputs processor._transform = Mock(side_effect=lambda transform: transform) with self.test_session() as session: example = self.make_example_128x240() processed = processor.process(example) processor._transform.assert_called_once() self.assertAllEqual( session.run(processed.instances[FEATURE_CAMERA]), session.run(example.instances[FEATURE_CAMERA]), ) self.assertAllClose( session.run(processed.labels[LABEL_MAP]), session.run(example.labels[LABEL_MAP]), )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/transform_processor_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Processor for scaling and translating labels.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.data_format import CHANNELS_FIRST from nvidia_tao_tf1.blocks.multi_source_loader.processors.processor import ( Processor, ) from nvidia_tao_tf1.blocks.multi_source_loader.types import ( Example, LABEL_MAP, Polygon2DLabel, PolygonLabel, SequenceExample, ) from nvidia_tao_tf1.core.coreobject import save_args class LabelAdjustment(Processor): """LabelAdjustment processor.""" @save_args def __init__(self, scale=1.0, translation_x=0, translation_y=0): """Create a processor for scaling and translating the labels. Used as a workaround for datasets where image and label coordinate systems do not match. Args: scale (float): Label scaling factor. translation_x (int): Label translation in x direction. translation_y (int): Label translation in y direction. """ super(LabelAdjustment, self).__init__() if scale <= 0: raise ValueError("Scale: {} is not positive.".format(scale)) if translation_x < 0: raise ValueError( "Translation x: {} cannot be a negative number.".format(translation_x) ) if translation_y < 0: raise ValueError( "Translation y: {} cannot be a negative number.".format(translation_y) ) self._scale = scale self._translation_x = translation_x self._translation_y = translation_y @property def supported_formats(self): """Data format of the image/frame tensors to process.""" return [CHANNELS_FIRST] def can_compose(self, other): """ Determine whether two processors can be composed into a single one. Args: other (Processor): Other processor instance. Returns: (Boolean): Always False - composition not supported. """ return False def compose(self, other): """Compose two processors into a single one. Args: other (Processor): Other processor instance. Raises: NotImplementedError: Composition not supported. """ raise NotImplementedError("Composition not supported.") def process(self, example): """ Apply adjustments to the polygon/ polyline labels. Labels do not always have the correct size information (half vs full) and are not necessarily aligned with the images (center cropped datasets). The label adjustments parameters contain translation and scaling information to align the labels with images. Args: example (Example): Example with the labels to apply the adjustments on. Returns: (Example): Example with the adjusted labels. """ # TODO(vkallioniemi): remove these adjustments and the related info from the specs once # all tfrecords have been regenerated with accurate information. # Sometimes the records do not contain correct size information for labels, so we have to # scale and translate them. if isinstance(example, SequenceExample): labels = example.labels if LABEL_MAP in labels: # Polygon2DLabel polygon_2d_label = labels[LABEL_MAP] # Coordinates2DWithCounts coordinates_2d = polygon_2d_label.vertices sparse_coordinates = coordinates_2d.coordinates def _transform_coordinates(coordinates): vertices = tf.reshape(coordinates.values, [-1, 2]) scaled = tf.matmul( vertices, tf.linalg.tensor_diag([self._scale, self._scale]) ) translated = tf.subtract( scaled, tf.constant( [self._translation_x, self._translation_y], dtype=tf.float32 ), ) translated = tf.reshape( translated, tf.shape(input=coordinates.values) ) return tf.SparseTensor( indices=coordinates.indices, values=translated, dense_shape=coordinates.dense_shape, ) # In case where coordinates are empty do nothing. Transformations on empty Sparse # tensors were causing problems. transformed_coordinates = tf.cond( pred=tf.greater(tf.size(input=sparse_coordinates.values), 0), true_fn=lambda: _transform_coordinates(sparse_coordinates), false_fn=lambda: sparse_coordinates, ) transformed_coordinates_2d = coordinates_2d.replace_coordinates( transformed_coordinates ) translated_polygon_2d_label = Polygon2DLabel( vertices=transformed_coordinates_2d, classes=polygon_2d_label.classes, attributes=polygon_2d_label.attributes, ) labels[LABEL_MAP] = translated_polygon_2d_label return SequenceExample(instances=example.instances, labels=labels) # Legacy LaneNet dataloader and expect transformations to be applied # here. TODO(vkallioniemi or ehall): remove this functionality once we delete # the old dataloader. labels = example.labels[LABEL_MAP] polygon_2d_label = example.labels[LABEL_MAP].polygons scaled_polygons = tf.matmul( polygon_2d_label, tf.linalg.tensor_diag([self._scale, self._scale]) ) translated_polygon_2d_label = tf.subtract( scaled_polygons, tf.constant([self._translation_x, self._translation_y], dtype=tf.float32), ) adjusted_polygon = PolygonLabel( polygons=translated_polygon_2d_label, vertices_per_polygon=labels.vertices_per_polygon, class_ids_per_polygon=labels.class_ids_per_polygon, attributes_per_polygon=labels.attributes_per_polygon, polygons_per_image=labels.polygons_per_image, attributes=labels.attributes, attribute_count_per_polygon=labels.attribute_count_per_polygon, ) return Example( instances=example.instances, labels={LABEL_MAP: adjusted_polygon} )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/label_adjustment.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for RandomHueSaturation processor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import re from mock import patch from parameterized import parameterized from nvidia_tao_tf1.blocks.multi_source_loader.processors.processor_test_case import ( ProcessorTestCase, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.random_hue_saturation import ( RandomHueSaturation, ) from nvidia_tao_tf1.core.coreobject import deserialize_tao_object from modulus.processors.augment.color import random_hue_saturation_matrix def _esc(message): """Escape passed in string for regular expressions.""" return re.escape(message) class TestRandomHueSaturation(ProcessorTestCase): @parameterized.expand( [ [ -1.0, 0.0, _esc( "RandomHueSaturation.hue_rotation_max (-1.0) is not within the range \ [0.0, 360.0]." ), ], [ 361.0, 0.0, _esc( "RandomHueSaturation.hue_rotation_max (361.0) is not within the range \ [0.0, 360.0]." ), ], [ 150.0, -1.0, _esc( "RandomHueSaturation.saturation_shift_max (-1.0) is not within the range \ [0.0, 1.0]." ), ], ] ) def test_raises_on_invalid_arguments( self, hue_rotation_max, saturation_shift_max, message ): with self.assertRaisesRegexp(ValueError, message): RandomHueSaturation( hue_rotation_max=hue_rotation_max, saturation_shift_max=saturation_shift_max, ) @patch( "modulus.processors.augment.color.random_hue_saturation_matrix", side_effect=random_hue_saturation_matrix, ) def test_delegates_to_random_hue_saturation_matrix( self, spied_random_hue_saturation_matrix ): example = self.make_example_128x240() augmentation = RandomHueSaturation( hue_rotation_max=180.0, saturation_shift_max=0.0 ) augmentation.process(example) spied_random_hue_saturation_matrix.assert_called_with( hue_rotation_max=180.0, saturation_shift_max=0.0, batch_size=None ) def test_serialization_and_deserialization(self): """Test that it is a TAOObject that can be serialized and deserialized.""" augmentation = RandomHueSaturation( hue_rotation_max=180.0, saturation_shift_max=0.0 ) augmentation_dict = augmentation.serialize() deserialized_augmentation = deserialize_tao_object(augmentation_dict) self.assertEqual( str(augmentation._transform), str(deserialized_augmentation._transform) )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/random_hue_saturation_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unitests for the path generator.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from parameterized import parameterized import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.frame_shape import FrameShape from nvidia_tao_tf1.blocks.multi_source_loader.processors.path_generator import ( PathGenerator, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.priors_generator import ( PriorsGenerator, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.processor_test_case import ( ProcessorTestCase, ) from nvidia_tao_tf1.blocks.multi_source_loader.types import ( Coordinates2DWithCounts, Example, FEATURE_CAMERA, LABEL_MAP, Polygon2DLabel, PolygonLabel, ) import nvidia_tao_tf1.blocks.multi_source_loader.types.test_fixtures as fixtures from nvidia_tao_tf1.core.coreobject import deserialize_tao_object class TestPathGenerator(ProcessorTestCase): def _create_path_generator(self, npath_attributes=0, edges_per_path=2): class_name_to_id = {"drivepath 0": 0, "drivepath 1": 1, "drivepath -1": 2} equidistant_interpolation = False return PathGenerator( nclasses=3, class_name_to_id=class_name_to_id, equidistant_interpolation=equidistant_interpolation, npath_attributes=npath_attributes, edges_per_path=edges_per_path, ) def _make_multi_polygon_label( self, vertices, vertices_per_polyline, class_ids_per_polyline, attributes_per_polyline, ): """Create a PolygonLabel. Args: vertices (list of `float`): Vertices that make up the polygon. class_id (int): Identifier for the class that the label represents. """ polygons = tf.constant(vertices, dtype=tf.float32) vertices_per_polygon = tf.constant(vertices_per_polyline, dtype=tf.int32) class_ids_per_polygon = tf.constant(class_ids_per_polyline, dtype=tf.int32) attributes_per_polygon = tf.constant(attributes_per_polyline, dtype=tf.int32) polygons_per_image = tf.constant([1], dtype=tf.int32) attributes = (tf.constant([], tf.int32),) attribute_count_per_polygon = tf.constant([], tf.int32) return PolygonLabel( polygons=polygons, vertices_per_polygon=vertices_per_polygon, class_ids_per_polygon=class_ids_per_polygon, attributes_per_polygon=attributes_per_polygon, polygons_per_image=polygons_per_image, attributes=attributes, attribute_count_per_polygon=attribute_count_per_polygon, ) def _make_coordinates2d(self, polylines, vertices_per_polyline): frame_shape_count = len(vertices_per_polyline) indices = [] for shape_index in range(frame_shape_count): for vertex_index in range(vertices_per_polyline[shape_index]): for coordinate_index in [0, 1]: indices.append([0, 0, shape_index, vertex_index, coordinate_index]) dense_coordinates = tf.constant(polylines, dtype=tf.float32) sparse_indices = tf.constant(indices, dtype=tf.int64) dense_shape = tf.constant( (1, 1, frame_shape_count, max(vertices_per_polyline), 2), dtype=tf.int64 ) sparse_coordinates = tf.SparseTensor( indices=sparse_indices, values=tf.reshape(dense_coordinates, (-1,)), dense_shape=dense_shape, ) vertices_count = tf.SparseTensor( indices=tf.constant( [[0, 0, j] for j in range(frame_shape_count)], dtype=tf.int64 ), values=tf.constant(vertices_per_polyline), dense_shape=tf.constant([1, 1, frame_shape_count], dtype=tf.int64), ) return Coordinates2DWithCounts( coordinates=sparse_coordinates, canvas_shape=fixtures.make_canvas2d(1, 100, 100), vertices_count=vertices_count, ) @parameterized.expand( [ [np.array([-1, 1, -1, 1]), 0, np.array([-1, 1, -1, 1])], [np.array([0, 0, 0]), 0, np.array([0, 0, 0])], [np.array([0, 0, 0, 0]), 1, np.array([0, 0, 0, 0])], [ np.array([-1, 3, 1, 3, 2, -1, 2, 1, 0, 0]), 1, np.array([-1, 1, -1, 1, 0, 3, 3, 2, 2, 0]), ], [ np.array([0, 0, -1, 2, 0, 0, 1, 2, 3, -1, 3, 1]), 1, np.array([0, -1, 0, 1, -1, 1, 0, 2, 0, 2, 3, 3]), ], ] ) def test_reorder_attributes( self, attributes, npath_attributes, expected_attributes ): path_generator = self._create_path_generator() attributes = tf.constant(attributes) reordered_attributes = path_generator._reorder_attributes( attributes, npath_attributes ) with self.test_session(): reordered_attributes_np = reordered_attributes.eval() self.assertAllEqual(reordered_attributes_np, expected_attributes) def test_constructor_sets_output_classes(self): path_generator = self._create_path_generator() assert path_generator.class_name_to_id == { "drivepath 0": 0, "drivepath 1": 1, "drivepath -1": 2, } _POLYLINES = np.array( [ [30, 60], [30, 80], [90, 60], [90, 80], [20, 30], [20, 50], [20, 60], [20, 80], [40, 30], [40, 50], [40, 60], [40, 80], [40, 100], [60, 100], [80, 100], [40, 60], ], dtype=np.float32, ) @parameterized.expand( [ [ _POLYLINES[:4], np.array([2, 2]), np.array([0, 0]), np.array([-1, 1]), np.array([4, 16]), np.array([3]), ], [ _POLYLINES[:12], np.array([2, 2, 4, 4]), np.array([0, 0, 1, 1]), np.array([-1, 1, -1, 1]), np.array([4, 16]), np.array([2, 3]), ], [ _POLYLINES[:16], np.array([2, 2, 4, 3, 2, 3]), np.array([0, 0, 1, 1, 2, 2]), np.array([-1, 1, -1, 1, -1, 1]), np.array([4, 16]), np.array([0, 2]), ], ] ) def test_process_with_varying_polylines( self, polylines, vertices_per_polyline, class_ids_per_polyline, attributes_ids_per_polyline, expected_shapes, expected_valid_paths, ): height = 100 width = 100 frames = tf.ones((1, 3, height, width)) polygon = self._make_multi_polygon_label( polylines, vertices_per_polyline, class_ids_per_polyline, attributes_ids_per_polyline, ) example = Example( instances={FEATURE_CAMERA: frames}, labels={LABEL_MAP: polygon} ) priors_generator = PriorsGenerator( npoint_priors=1, nlinear_priors=0, points_per_prior=3, prior_threshold=0.2, feature_map_sizes=[(2, 2)], image_height=height, image_width=width, ) path_generator = self._create_path_generator() with self.test_session(): path_generated = path_generator.encode_dense(example, priors_generator) targets = path_generated.labels[LABEL_MAP].eval() # Make sure frames are untouched. self.assertAllEqual( path_generated.instances[FEATURE_CAMERA].eval(), frames.eval() ) self.assertAllEqual(targets.shape, expected_shapes) # `targets` has as many rows as number of priors. # If one of the priors is assigned to a label, # some of the values of that row are > 0. # If the prior is not assigned to the label, all the values from that row are <= 0. priors_assigned_to_labels = np.unique(np.where(targets > 0)[0]) self.assertAllEqual(priors_assigned_to_labels, expected_valid_paths) @parameterized.expand( [ [ _POLYLINES[:4], np.array([2, 2]), np.array([0, 0]), np.array([-1, 2, 1, 2]), np.array([4, 17]), np.array([3]), ], [ _POLYLINES[:12], np.array([2, 2, 4, 4]), np.array([0, 0, 1, 1]), np.array([3, -1, 1, 3, -1, 2, 2, 1]), np.array([4, 17]), np.array([2, 3]), ], [ _POLYLINES[:16], np.array([2, 2, 4, 3, 2, 3]), np.array([0, 0, 1, 1, 2, 2]), np.array([-1, 2, 1, 2, 3, -1, 1, 3, -1, 3, 1, 3]), np.array([4, 17]), np.array([0, 2]), ], ] ) def test_process_with_varying_polylines_and_path_attributes( self, polylines, vertices_per_polyline, class_ids_per_polyline, attributes_ids_per_polyline, expected_shapes, expected_valid_paths, ): height = 100 width = 100 frames = tf.ones((1, 3, height, width)) polygon = self._make_multi_polygon_label( polylines, vertices_per_polyline, class_ids_per_polyline, attributes_ids_per_polyline, ) example = Example( instances={FEATURE_CAMERA: frames}, labels={LABEL_MAP: polygon} ) priors_generator = PriorsGenerator( npoint_priors=1, nlinear_priors=0, points_per_prior=3, prior_threshold=0.2, feature_map_sizes=[(2, 2)], image_height=height, image_width=width, ) path_generator = self._create_path_generator(npath_attributes=1) with self.test_session(): path_generated = path_generator.encode_dense(example, priors_generator) targets = path_generated.labels[LABEL_MAP].eval() # Make sure frames are untouched. self.assertAllEqual( path_generated.instances[FEATURE_CAMERA].eval(), frames.eval() ) self.assertAllEqual(targets.shape, expected_shapes) # `targets` has as many rows as number of priors. # If one of the priors is assigned to a label, # some of the values of that row are > 0. # If the prior is not assigned to the label, all the values from that row are <= 0. priors_assigned_to_labels = np.unique(np.where(targets > 0)[0]) self.assertAllEqual(priors_assigned_to_labels, expected_valid_paths) @parameterized.expand( [ [1, 0, 3, np.array([4, 16]), np.array([3])], [1, 0, 2, np.array([4, 12]), np.array([])], [1, 1, 3, np.array([8, 16]), np.array([3])], ] ) def test_process_with_varying_priors( self, npoint_priors, nlinear_priors, points_per_prior, expected_shapes, expected_valid_paths, ): height = 100 width = 100 frames = tf.ones((1, 3, height, width)) polylines = np.array([[30, 60], [30, 80], [90, 60], [90, 80]]) vertices_per_polyline = np.array([2, 2]) class_ids_per_polyline = np.array([0, 0]) attributes_ids_per_polyline = np.array([-1, 1]) polygon = self._make_multi_polygon_label( polylines, vertices_per_polyline, class_ids_per_polyline, attributes_ids_per_polyline, ) example = Example( instances={FEATURE_CAMERA: frames}, labels={LABEL_MAP: polygon} ) priors_generator = PriorsGenerator( npoint_priors=npoint_priors, nlinear_priors=nlinear_priors, points_per_prior=points_per_prior, prior_threshold=0.2, feature_map_sizes=[(2, 2)], image_height=height, image_width=width, ) path_generator = self._create_path_generator() with self.test_session(): path_generated = path_generator.encode_dense(example, priors_generator) targets = path_generated.labels[LABEL_MAP].eval() self.assertAllEqual( path_generated.instances[FEATURE_CAMERA].eval(), frames.eval() ) self.assertAllEqual(targets.shape, expected_shapes) # Get the rows of the priors assigned to the labels. priors_assigned_to_labels = np.unique(np.where(targets > 0)[0]) self.assertAllEqual(priors_assigned_to_labels, expected_valid_paths) @parameterized.expand( [ [[[1]], [1], [0]], [[[1, 1]], [1], [0]], [[[1], [2]], [1], [0]], [[[1, 2], [2, 3]], [1], [0]], [[[1, 2, 3], [4, 5, 6]], [1], [0]], ] ) def test_encode_with_triangle_shapes( self, shapes_per_frame, shape_classes, shape_attributes ): nclasses = 3 height = 100 width = 100 example_count = len(shapes_per_frame) max_frame_count = max( [len(frames_per_example) for frames_per_example in shapes_per_frame] ) priors_generator = PriorsGenerator( npoint_priors=1, nlinear_priors=8, points_per_prior=2, prior_threshold=0.1, feature_map_sizes=[(2, 2)], image_height=height, image_width=width, ) expected_shape = ( example_count, max_frame_count, priors_generator.nall_priors, priors_generator.points_per_prior * 4 + nclasses + 1, ) path_generator = self._create_path_generator() polygon = fixtures.make_polygon2d_label( shapes_per_frame=shapes_per_frame, shape_classes=shape_classes, shape_attributes=shape_attributes, height=height, width=width, ) with self.test_session(): path_generated = path_generator.encode_sparse( labels2d=polygon, priors_generator=priors_generator, image_shape=FrameShape(height=504, width=960, channels=3), ) self.assertAllEqual(expected_shape, path_generated.shape) _POLYLINES = np.array( [ [30, 60], [30, 80], [90, 60], [90, 80], [20, 30], [20, 50], [20, 60], [20, 80], [40, 30], [40, 50], [40, 60], [40, 80], [40, 100], [60, 100], [80, 100], [40, 60], ], dtype=np.float32, ) @parameterized.expand( [ [ _POLYLINES[:4], [2, 2], [[[[0], [0]]]], [[[[-1], [1]]]], 2, np.array([1, 1, 4, 16]), np.array([3]), ], [ _POLYLINES[:12], [2, 2, 4, 4], [[[[0], [0], [1], [1]]]], [[[[-1], [1], [-1], [1]]]], 2, np.array([1, 1, 4, 16]), np.array([2, 3]), ], [ _POLYLINES[:16], [2, 2, 4, 3, 2, 3], [[[[0], [0], [1], [1], [2], [2]]]], [[[[-1], [1], [-1], [1], [-1], [1]]]], 2, np.array([1, 1, 4, 16]), np.array([0, 2]), ], [ np.concatenate((_POLYLINES[:16], _POLYLINES[:9]), axis=0), [2, 2, 4, 3, 2, 3, 3, 3, 3], [[[[0], [0], [1], [1], [2], [2], [0], [1], [2]]]], [[[[-1], [1], [-1], [1], [-1], [1], [2], [2], [2]]]], 3, np.array([1, 1, 4, 22]), np.array([0, 2]), ], ] ) def test_encode_with_varying_polylines_and_edges_per_path( self, polylines, vertices_per_polyline, class_ids_per_polyline, attributes_ids_per_polyline, edges_per_path, expected_shapes, expected_valid_paths, ): polygon = Polygon2DLabel( vertices=self._make_coordinates2d(polylines, vertices_per_polyline), classes=fixtures.make_tags(class_ids_per_polyline), attributes=fixtures.make_tags(attributes_ids_per_polyline), ) priors_generator = PriorsGenerator( npoint_priors=1, nlinear_priors=0, points_per_prior=3, prior_threshold=0.2, feature_map_sizes=[(2, 2)], image_height=100, image_width=100, ) path_generator = self._create_path_generator(edges_per_path=edges_per_path) with self.test_session(): path_generated = path_generator.encode_sparse( labels2d=polygon, priors_generator=priors_generator, image_shape=FrameShape(height=504, width=960, channels=3), ) targets = path_generated.eval() self.assertAllEqual(targets.shape, expected_shapes) priors_assigned_to_labels = np.unique(np.where(targets > 0)[2]) self.assertAllEqual(priors_assigned_to_labels, expected_valid_paths) @parameterized.expand( [ [1, 0, 3, np.array([1, 1, 4, 16]), np.array([3])], [1, 0, 2, np.array([1, 1, 4, 12]), np.array([])], [1, 1, 3, np.array([1, 1, 8, 16]), np.array([3])], ] ) def test_encode_with_varying_priors( self, npoint_priors, nlinear_priors, points_per_prior, expected_shapes, expected_valid_paths, ): polygon = Polygon2DLabel( vertices=self._make_coordinates2d( [[30, 60], [30, 80], [90, 60], [90, 80]], [2, 2] ), classes=fixtures.make_tags([[[[0], [0]]]]), attributes=fixtures.make_tags([[[[-1], [1]]]]), ) priors_generator = PriorsGenerator( npoint_priors=npoint_priors, nlinear_priors=nlinear_priors, points_per_prior=points_per_prior, prior_threshold=0.2, feature_map_sizes=[(2, 2)], image_height=100, image_width=100, ) path_generator = self._create_path_generator() with self.test_session(): path_generated = path_generator.encode_sparse( labels2d=polygon, priors_generator=priors_generator, image_shape=FrameShape(height=504, width=960, channels=3), ) targets = path_generated.eval() self.assertAllEqual(targets.shape, expected_shapes) priors_assigned_to_labels = np.unique(np.where(targets > 0)[2]) self.assertAllEqual(priors_assigned_to_labels, expected_valid_paths) def test_serialization_and_deserialization(self): """Test that it is a TAOObject that can be serialized and deserialized.""" path_generator = self._create_path_generator() path_generator_dict = path_generator.serialize() deserialized_path_generator = deserialize_tao_object(path_generator_dict) self.assertEqual(path_generator.nclasses, deserialized_path_generator.nclasses) self.assertEqual( path_generator.class_name_to_id, deserialized_path_generator.class_name_to_id, ) self.assertEqual( path_generator._equidistant_interpolation, deserialized_path_generator._equidistant_interpolation, ) self.assertEqual( path_generator._path_priors, deserialized_path_generator._path_priors ) self.assertEqual( path_generator._prior_assignment_constraint, deserialized_path_generator._prior_assignment_constraint, ) self.assertEqual( path_generator._using_invalid_path_class, deserialized_path_generator._using_invalid_path_class, ) self.assertEqual( path_generator.npath_attributes, deserialized_path_generator.npath_attributes, ) self.assertEqual( path_generator._edges_per_path, deserialized_path_generator._edges_per_path )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/path_generator_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Processor for applying random translation augmentations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from nvidia_tao_tf1.blocks.multi_source_loader.processors.transform_processor import ( TransformProcessor, ) from nvidia_tao_tf1.core.coreobject import save_args from nvidia_tao_tf1.core.processors import RandomTranslation as _RandomTranslation class RandomTranslation(TransformProcessor): """Augmentation processor that randomly translates images and labels.""" @save_args def __init__(self, max_x, max_y, probability=1.0): """Construct a RandomTranslation processor. Args: max_x (int): If translation occurs, this is the lower and higher bound the uniform distribution from which an integer will be picked to translate horizontally. max_y (int): If translation occurs, this is the lower and higher bound the uniform distribution from which an integer will be picked to translate vertically. probability (float): Probability at which translation occurs. """ super(RandomTranslation, self).__init__( _RandomTranslation(max_x, max_y, probability) )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/random_translation.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Processors benchmark suite.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.processors.crop import Crop from nvidia_tao_tf1.blocks.multi_source_loader.processors.pipeline import Pipeline from nvidia_tao_tf1.blocks.multi_source_loader.processors.random_contrast import ( RandomContrast, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.scale import Scale from nvidia_tao_tf1.blocks.multi_source_loader.types import ( empty_polygon_label, Example, ) class ProcessorBenchmark(tf.test.Benchmark): """Processors benchmark suite.""" ITERATIONS = 1000 def create_example(self, image_width, image_height): """Create an example. Args: image_width (int): Image width to generate image. image_height (int): Image height to generate image. Return: (Example): A 4D example tensor. """ frames = tf.constant(1.0, shape=[3, image_height, image_width]) return Example( frames=tf.expand_dims(frames, axis=0), labels=empty_polygon_label(), ids=tf.constant(42), ) def _benchmark_single_processor(self, sess): """Benchmark single processor. Args: sess (tf.Session()): Session to run the benchmark. """ example = self.create_example(960, 504) crop = Crop(left=181, top=315, right=779, bottom=440) pipeline = Pipeline([crop]) run_tensor = pipeline(example) self.run_op_benchmark( sess=sess, op_or_tensor=run_tensor, min_iters=self.ITERATIONS, store_trace=True, store_memory_usage=True, ) def _benchmark_multi_processors(self, sess): """Benchmark multiple processors. Args: sess (tf.Session()): Session to run the benchmark. """ example = self.create_example(960, 504) crop = Crop(left=181, top=315, right=779, bottom=440) scale = Scale(height=200, width=960) random_contrast = RandomContrast(scale_max=0.5, center=0.5) pipeline = Pipeline([crop, scale, random_contrast]) run_tensor = pipeline(example) self.run_op_benchmark( sess=sess, op_or_tensor=run_tensor, min_iters=self.ITERATIONS, store_trace=True, store_memory_usage=True, ) def benchmark_single_processor_with_gpu(self): """Benchmark a single processor with gpu.""" with tf.device("/gpu:0"), tf.compat.v1.Session() as sess: self._benchmark_single_processor(sess) def benchmark_single_processor_with_cpu(self): """Benchmark a single processor with cpu.""" with tf.device("/cpu:0"), tf.compat.v1.Session() as sess: self._benchmark_single_processor(sess) def benchmark_multi_processors_with_gpu(self): """Benchmark multiple processors with gpu.""" with tf.device("/gpu:0"), tf.compat.v1.Session() as sess: self._benchmark_multi_processors(sess) def benchmark_multi_processors_with_cpu(self): """Benchmark multiple processors with cpu.""" with tf.device("/cpu:0"), tf.compat.v1.Session() as sess: self._benchmark_multi_processors(sess) if __name__ == "__main__": tf.test.main()
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/processor_benchmark.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Processor for converting polylines into polygons.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import logging import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.types.coordinates2d import ( Coordinates2D, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.polygon2d_label import ( Polygon2DLabel, ) from nvidia_tao_tf1.core.coreobject import TAOObject, save_args from nvidia_tao_tf1.core.processors.processors import is_sparse, load_custom_tf_op logger = logging.getLogger(__name__) class PolylineToPolygon(TAOObject): """Processor that converts polylines to polygons to be later rasterized.""" @save_args def __init__(self, class_id, line_width=1.0, debug=False): """ Construct a PolylineToPolygon processor. Args: class_id (int): The class id that contains polylines to convert. line_width (float): The width of the resulting polygons. Raises: ValueError: When invalid arguments are provided. """ super(PolylineToPolygon, self).__init__() if class_id < 0: raise ValueError( "class_id: {} must be a valid >= 0 number.".format(class_id) ) if line_width <= 0.0: raise ValueError("line_width must be > 0. {} provided.".format(line_width)) self.class_id = class_id self.line_width = float(line_width) self.debug = 1 if debug else 0 def process(self, labels2d): """ Convert a polyline to a set of polygons, to be consumed by the polygon rasterizer. Args: labels2d (Polygon2DLabel): A label containing 2D polygons/polylines and their associated classes and attributes. The first two dimensions of each tensor that this structure contains should be batch/example followed by a frame/time dimension. The rest of the dimensions encode type specific information. See Polygon2DLabel documentation for details. Returns: (Polygon2DLabel): A label with the same format as before, with components with the label to be converted treated as polylines and then converted to polygons. """ logger.info( "Building polyline to polygon conversion for class {} with width {}.".format( self.class_id, self.line_width ) ) new_coordinates, new_class_ids = self._process( polygons=labels2d.vertices.coordinates, class_ids_per_polygon=labels2d.classes, ) return Polygon2DLabel( vertices=Coordinates2D( coordinates=new_coordinates, canvas_shape=labels2d.vertices.canvas_shape ), classes=new_class_ids, attributes=labels2d.attributes, ) def _process(self, polygons, class_ids_per_polygon): assert is_sparse(polygons) assert is_sparse(class_ids_per_polygon) op = load_custom_tf_op("op_polyline_to_polygon.so", __file__) ( op_indices, op_values, op_dense_shape, op_class_ids_indices, op_class_ids_values, op_class_ids_shape, ) = op.polyline_to_polygon( polygon_indices=polygons.indices, polygon_values=polygons.values, polygon_dense_shape=polygons.dense_shape, class_ids_indices=class_ids_per_polygon.indices, class_ids_values=class_ids_per_polygon.values, class_ids_shape=class_ids_per_polygon.dense_shape, target_class_id=self.class_id, line_width=self.line_width, debug=self.debug, ) polygons = tf.SparseTensor( indices=op_indices, values=op_values, dense_shape=op_dense_shape ) class_ids = tf.SparseTensor( indices=op_class_ids_indices, values=op_class_ids_values, dense_shape=op_class_ids_shape, ) return polygons, class_ids
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/polyline_to_polygon.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Processor for applying random hue and saturation augmentations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from nvidia_tao_tf1.blocks.multi_source_loader.processors.transform_processor import ( TransformProcessor, ) from nvidia_tao_tf1.core.coreobject import save_args from nvidia_tao_tf1.core.processors import RandomHueSaturation as _RandomHueSaturation class RandomHueSaturation(TransformProcessor): """Augmentation processor that randomly perturbs the hue and saturation of colors.""" @save_args def __init__(self, hue_rotation_max, saturation_shift_max): """Construct a RandomHueSaturation processor. Args: hue_rotation_max (float): The maximum rotation angle (0-360). This used in a truncated normal distribution, with a zero mean. This rotation angle is half of the standard deviation, because twice the standard deviation will be truncated. A value of 0 will not affect the matrix. saturation_shift_max (float): The random uniform shift between 0 - 1 that changes the saturation. This value gives the negative and positive extent of the augmentation, where a value of 0 leaves the matrix unchanged. For example, a value of 1 can result in a saturation values bounded between of 0 (entirely desaturated) and 2 (twice the saturation). """ super(RandomHueSaturation, self).__init__( _RandomHueSaturation(hue_rotation_max, saturation_shift_max) )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/random_hue_saturation.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Main test for PriorsGenerator object.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import mock import numpy as np import pytest import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.processors.priors_generator import ( PriorsGenerator, ) _COORDINATES_PER_POINT = 4 _FEATURE_MAP_SIZES = [(3, 5)] _FLOATING_POINT_TOLERANCE = 0.01 _IMAGE_HEIGHT = 504 _IMAGE_WIDTH = 960 _NPOINT_PRIORS = 1 _NLINEAR_PRIORS = 8 _NPRIORS = _NPOINT_PRIORS + _NLINEAR_PRIORS _POINTS_PER_PRIOR = 2 _PRIOR_THRESHOLD = 0.1 @pytest.fixture(scope="session") def _priors_generator(): return PriorsGenerator( _NPOINT_PRIORS, _NLINEAR_PRIORS, _POINTS_PER_PRIOR, _PRIOR_THRESHOLD, _FEATURE_MAP_SIZES, _IMAGE_HEIGHT, _IMAGE_WIDTH, ) # Test all input parameters with positive and negative values. transform_tests = [ # Test rotation clockwise. ((0, 0), 1.0, 0.0, -np.pi / 2, 1.0, 1.0, 0.0, 0.0, (0.0, -1.0)), # Test rotation counter-clockwise. ((0, 0), 1.0, 0.0, np.pi / 2, 1.0, 1.0, 0.0, 0.0, (0.0, 1.0)), # Test rotation clockwise about other end. ((1.0, 0), 0.0, 0.0, -np.pi / 2, 1.0, 1.0, 0.0, 0.0, (1.0, 1.0)), # Test rotation counter-clockwiseabout other end. ((1.0, 0), 0.0, 0.0, np.pi / 2, 1.0, 1.0, 0.0, 0.0, (1.0, -1.0)), # Test scale up x. ((0, 0), 1.0, 1.0, 0.0, 10.0, 1.0, 0.0, 0.0, (10.0, 1.0)), # Test scale up y. ((0, 0), 1.0, 1.0, 0.0, 1.0, 10.0, 0.0, 0.0, (1.0, 10.0)), # Test scale down x. ((0, 0), 1.0, 1.0, 0.0, 0.1, 1.0, 0.0, 0.0, (0.1, 1.0)), # Test scale down y. ((0, 0), 1.0, 1.0, 0.0, 1.0, 0.1, 0.0, 0.0, (1.0, 0.1)), # Test translate x to the left. ((0, 0), 1.0, 1.0, 0.0, 1.0, 1.0, -10.0, 0.0, (-9.0, 1.0)), # Test translate y downward. ((0, 0), 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, -10.0, (1.0, -9.0)), # Test translate x to the right. ((0, 0), 1.0, 1.0, 0.0, 1.0, 1.0, 10.0, 0.0, (11.0, 1.0)), # Test translate y upward. ((0, 0), 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 10.0, (1.0, 11.0)), ] @pytest.mark.parametrize( "origin, xs, ys, angle, scale_x, scale_y, tx, ty, expected_point", transform_tests ) def test_transform_prior( _priors_generator, origin, xs, ys, angle, scale_x, scale_y, tx, ty, expected_point ): """Test the rotation, translation and scaling functionality.""" transformed_point = _priors_generator._transform_prior( origin, xs, ys, angle, scale_x, scale_y, tx, ty ) with tf.compat.v1.Session() as sess: actual_point = sess.run(tf.reshape(transformed_point, [-1])) np.testing.assert_almost_equal(actual_point, expected_point, decimal=4) def test_get_prior_locations(_priors_generator): """Test locations of priors encoded correctly.""" receptive_field_x = np.floor(_IMAGE_WIDTH / 5.0) receptive_field_y = np.floor(_IMAGE_HEIGHT / 3.0) prior_locations = _priors_generator._get_prior_locations( receptive_field_x, receptive_field_y, image_height=_IMAGE_HEIGHT, image_width=_IMAGE_WIDTH, ) assert prior_locations[0].shape == (3, 5) assert prior_locations[0][0][0] == (_IMAGE_WIDTH / 5.0) / 2.0 assert prior_locations[1][0][0] == (_IMAGE_HEIGHT / 3.0) / 2.0 # Test point priors. point_prior_tests = [ # One prior. (100.0, 200.0, [100.0, 200.0, 100.0, 200.0, 100.0, 200.0, 100.0, 200.0]) ] @pytest.mark.parametrize("tx, ty, expected_priors", point_prior_tests) def test_generate_point_priors(_priors_generator, tx, ty, expected_priors): """Test that correct number and shape of point priors are created.""" priors = _priors_generator._generate_point_priors( tx, ty, image_width=1, image_height=1 ) with tf.compat.v1.Session() as sess: actual_priors = sess.run(tf.reshape(priors, [-1])) np.testing.assert_array_almost_equal(actual_priors, expected_priors, decimal=4) # Test one and three linear priors. linear_prior_tests = [ # One prior. ( [0.5 * np.pi], 1.0, 1.0, 100.0, 100.0, [0.1562, 0.2004, 0.0521, 0.2004, 0.1562, 0.1964, 0.0521, 0.1964], ), # Three priors, invalid angles. ([0.0, -0.5 * np.pi, 1.5 * np.pi], 1.0, 1.0, 100.0, 100.0, []), # Three priors. ( [0.25 * np.pi, 0.5 * np.pi, 0.75 * np.pi], 1.0, 1.0, 100.0, 100.0, [ 0.1417, 0.1297, 0.0681, 0.2700, 0.1403, 0.1269, 0.0666, 0.2672, 0.1562, 0.2004, 0.0521, 0.2004, 0.1562, 0.1964, 0.0521, 0.1964, 0.1403, 0.2700, 0.0666, 0.1297, 0.1417, 0.2672, 0.0681, 0.1269, ], ), ] @pytest.mark.parametrize( "angles, scale_x, scale_y, tx, ty, expected_priors", linear_prior_tests ) def test_generate_linear_priors( _priors_generator, angles, scale_x, scale_y, tx, ty, expected_priors ): """Test that correct number and shape of linear priors are created.""" if all(a >= 0.0 for a in angles) and all(a <= np.pi for a in angles): priors = _priors_generator._generate_linear_priors( angles, scale_x, scale_y, tx, ty, image_height=_IMAGE_HEIGHT, image_width=_IMAGE_WIDTH, ) with tf.compat.v1.Session() as sess: actual_priors = sess.run(tf.reshape(priors, [-1])) np.testing.assert_array_almost_equal(actual_priors, expected_priors, decimal=4) else: with pytest.raises(Exception): priors = _priors_generator._generate_linear_priors( angles, scale_x, scale_y, tx, ty, image_height=_IMAGE_HEIGHT, image_width=_IMAGE_WIDTH, ) # Test two different feature map sizes and more than one feature map. prior_tests = [ # One feature map. ([(5, 3)], 540), # Second feature map. ([(10, 6)], 2056), # Two feature maps. ([(2, 1), (5, 3)], 612), ] @pytest.mark.parametrize("feature_map_sizes, expected_prior_points", prior_tests) def test_get_priors(_priors_generator, feature_map_sizes, expected_prior_points): """Test that the correct priors are generated.""" priors = _priors_generator._get_priors( feature_map_sizes, _IMAGE_HEIGHT, _IMAGE_WIDTH ) with tf.compat.v1.Session() as sess: prior_x, prior_y = sess.run(tf.split(priors, num_or_size_splits=2, axis=1)) assert len(prior_x) == expected_prior_points assert len(prior_y) == expected_prior_points # Test two different feature map sizes and more than one feature map. nprior_tests = [ # One feature map. ([(5, 3)], 135), # Second feature map. ([(10, 6)], 540), # Two feature maps. ([(2, 1), (5, 3)], 153), ] @pytest.mark.parametrize("feature_map_sizes, expected_nall_priors", nprior_tests) def test_get_nall_priors(_priors_generator, feature_map_sizes, expected_nall_priors): """Test that the correct priors are generated.""" nall_priors = _priors_generator._get_nall_priors(feature_map_sizes) assert nall_priors == expected_nall_priors # Test zero and a combination of npriors. prior_nprior_tests = [ # Only point priors. (1, 0), # Only linear priors. (0, 8), # Linear and point priors. (1, 8), ] @pytest.mark.parametrize("npoint_priors, nlinear_priors", prior_nprior_tests) def test_get_priors_npriors(_priors_generator, npoint_priors, nlinear_priors): """Test that the correct priors are generated.""" nprior_locations = sum([np.prod(fmaps) for fmaps in _FEATURE_MAP_SIZES]) _priors_generator.npoint_priors = npoint_priors _priors_generator.nlinear_priors = nlinear_priors priors = _priors_generator._get_priors( _FEATURE_MAP_SIZES, _IMAGE_HEIGHT, _IMAGE_WIDTH ) with tf.compat.v1.Session() as sess: prior_x, prior_y = sess.run(tf.split(priors, num_or_size_splits=2, axis=1)) assert len(prior_x) == ( _COORDINATES_PER_POINT / 2 ) * _POINTS_PER_PRIOR * nprior_locations * (npoint_priors + nlinear_priors) assert len(prior_y) == ( _COORDINATES_PER_POINT / 2 ) * _POINTS_PER_PRIOR * nprior_locations * (npoint_priors + nlinear_priors) def test_raise_error_with_negative_npoints_priors(): with pytest.raises(ValueError) as exception: PriorsGenerator( npoint_priors=-1, nlinear_priors=_NLINEAR_PRIORS, points_per_prior=_POINTS_PER_PRIOR, prior_threshold=_PRIOR_THRESHOLD, feature_map_sizes=_FEATURE_MAP_SIZES, image_height=_IMAGE_HEIGHT, image_width=_IMAGE_WIDTH, ) assert str(exception.value) == "npoints_priors must be positive, it is -1." def test_raise_error_with_negative_nlinear_priors(): with pytest.raises(ValueError) as exception: PriorsGenerator( npoint_priors=_NPOINT_PRIORS, nlinear_priors=-1, points_per_prior=_POINTS_PER_PRIOR, prior_threshold=_PRIOR_THRESHOLD, feature_map_sizes=_FEATURE_MAP_SIZES, image_height=_IMAGE_HEIGHT, image_width=_IMAGE_WIDTH, ) assert str(exception.value) == "nlinear_priors must be positive, it is -1." def test_raise_error_with_zero_npriors(): with pytest.raises(ValueError) as exception: PriorsGenerator( npoint_priors=0, nlinear_priors=0, points_per_prior=_POINTS_PER_PRIOR, prior_threshold=_PRIOR_THRESHOLD, feature_map_sizes=_FEATURE_MAP_SIZES, image_height=_IMAGE_HEIGHT, image_width=_IMAGE_WIDTH, ) assert str(exception.value) == "npriors must be > 0, it is 0." def test_raise_error_with_zero_points_per_prior(): with pytest.raises(ValueError) as exception: PriorsGenerator( npoint_priors=_NPOINT_PRIORS, nlinear_priors=_NLINEAR_PRIORS, points_per_prior=0, prior_threshold=_PRIOR_THRESHOLD, feature_map_sizes=_FEATURE_MAP_SIZES, image_height=_IMAGE_HEIGHT, image_width=_IMAGE_WIDTH, ) assert str(exception.value) == "points_per_prior must be positive, not 0." @mock.patch( "nvidia_tao_tf1.blocks.multi_source_loader.processors.priors_generator_test.\ PriorsGenerator._get_nall_priors", return_value=0, ) def test_raise_error_with_zero_nall_priors(mocked_get_nall_priors): with pytest.raises(ValueError) as exception: PriorsGenerator( npoint_priors=_NPOINT_PRIORS, nlinear_priors=_NLINEAR_PRIORS, points_per_prior=_POINTS_PER_PRIOR, prior_threshold=_PRIOR_THRESHOLD, feature_map_sizes=_FEATURE_MAP_SIZES, image_height=_IMAGE_HEIGHT, image_width=_IMAGE_WIDTH, ) assert str(exception.value) == "There must be at least one prior, instead 0." @mock.patch( "nvidia_tao_tf1.blocks.multi_source_loader.processors.priors_generator_test.\ PriorsGenerator._get_priors", return_value=None, ) def test_raise_error_with_no_priors(mocked_get_priors): with pytest.raises(ValueError) as exception: PriorsGenerator( npoint_priors=_NPOINT_PRIORS, nlinear_priors=_NLINEAR_PRIORS, points_per_prior=_POINTS_PER_PRIOR, prior_threshold=_PRIOR_THRESHOLD, feature_map_sizes=_FEATURE_MAP_SIZES, image_height=_IMAGE_HEIGHT, image_width=_IMAGE_WIDTH, ) assert str(exception.value) == "There is not any prior set."
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/priors_generator_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Processor for applying random rotation augmentations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from nvidia_tao_tf1.blocks.multi_source_loader.processors.transform_processor import ( TransformProcessor, ) from nvidia_tao_tf1.core.coreobject import save_args from nvidia_tao_tf1.core.processors import RandomShear as _RandomShear class RandomShear(TransformProcessor): """Augmentation processor that randomly shear images and labels.""" @save_args def __init__(self, max_ratio_x, max_ratio_y, probability): """Construct a RandomShear processor. Args: max_ratio_x (float): Maximum shear ratio in horizontal direction. max_ratio_y (float): Maximum shear ratio in vertical direction. probability (float): Probability at which rotation is performed. """ super(RandomShear, self).__init__( _RandomShear( max_ratio_x=max_ratio_x, max_ratio_y=max_ratio_y, probability=probability, ) )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/random_shear.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for RandomContrast processor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from mock import patch from nvidia_tao_tf1.blocks.multi_source_loader.processors.processor_test_case import ( ProcessorTestCase, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.random_contrast import ( RandomContrast, ) from nvidia_tao_tf1.core.coreobject import deserialize_tao_object from modulus.processors.augment.color import random_contrast_matrix class TestRandomContrast(ProcessorTestCase): @patch( "modulus.processors.augment.color.random_contrast_matrix", side_effect=random_contrast_matrix, ) def test_delegates_to_random_contrast_matrix(self, spied_random_contrast_matrix): example = self.make_example_128x240() augmentation = RandomContrast(scale_max=180.0, center=0.0) augmentation.process(example) spied_random_contrast_matrix.assert_called_with( scale_max=180.0, center=0.0, batch_size=None ) def test_serialization_and_deserialization(self): """Test that it is a TAOObject that can be serialized and deserialized.""" augmentation = RandomContrast(scale_max=180.0, center=0.0) augmentation_dict = augmentation.serialize() deserialized_augmentation = deserialize_tao_object(augmentation_dict) self.assertEqual( str(augmentation._transform), str(deserialized_augmentation._transform) )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/random_contrast_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Processor for applying random flip augmentations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from nvidia_tao_tf1.blocks.multi_source_loader.processors.transform_processor import ( TransformProcessor, ) from nvidia_tao_tf1.core.coreobject import save_args from nvidia_tao_tf1.core.processors import RandomFlip as _RandomFlip class RandomFlip(TransformProcessor): """Augmentation processor that randomly flips images and labels. Note that the default value of horizontal_probability is different from vertical_probability due to compatability issues for networks that currently use this processor but assumes vertical_probability is 0. """ @save_args def __init__(self, horizontal_probability=1.0, vertical_probability=0.0): """Construct a RandomFlip processor. Args: horizontal_probability (float): Probability between 0 to 1 at which a left-right flip occurs. Defaults to 1.0. vertical_probability (float): Probability between 0 to 1 at which a top-bottom flip occurs. Defaults to 0.0. """ super(RandomFlip, self).__init__( _RandomFlip(horizontal_probability, vertical_probability) )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/random_flip.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test for SourceWeightSQLFrameProcessor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from parameterized import parameterized import tensorflow as tf import modulus from nvidia_tao_tf1.blocks.multi_source_loader.processors.source_weight_frame import ( SourceWeightSQLFrameProcessor, ) class TestSourceWeightSQLFrameProcessor(tf.test.TestCase): """Test for SourceWeightSQLFrameProcessor.""" @parameterized.expand([(1.0, 1.0), (2.0, 2.0)]) def test_process(self, source_weight, expected_weight): row = [ "person", [[50.5, 688.83], [164.28, 766.8]], 1, 0.0, # This is the field added by add_field ] label_indices = {"BOX": {"is_cvip": 2, "vertices": 1, "classifier": 0}} instances = {"source_weight": 3} example = modulus.types.types.Example(instances=instances, labels=label_indices) source_weight_processor = SourceWeightSQLFrameProcessor(source_weight) processed_row = source_weight_processor.map(example, row) self.assertEqual(expected_weight, processed_row[instances["source_weight"]])
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/source_weight_frame_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for class_attribute_mapper.py""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.processors.class_attribute_mapper import ( ClassAttributeMapper, ) from nvidia_tao_tf1.blocks.multi_source_loader.types import Polygon2DLabel import nvidia_tao_tf1.blocks.multi_source_loader.types.test_fixtures as fixtures from nvidia_tao_tf1.core.coreobject import deserialize_tao_object classes = [ # Examples [[["path1"], ["path2"]]], # Frames # Shapes # Tags [[["path1"], ["path1"]]], # Frames # Shapes # Tags [[["path0"]]], # Frames # Shapes # Tags [[[]]], # Frames # Shapes # Tags ] attributes = [ # Examples [ # Frames [["LefT EdGe", "Left Exit"], ["Left edge", "Left exit"]] # Shapes # Tags ], [ # Frames [ # Shapes ["Left Edge", "Left Exit", "different attribute"], # Tags ["Left Edge", "Left Exit"], ] ], [[["Left Edge", "Left Exit"]]], # Frames # Shapes # Tags [[["Left Edge"]]], # Frames # Shapes # Tags ] class_attribute_mapping = [ { "match_any_class": ["path 0", "path0", "path zero"], "match_any_attribute": ["Left Edge", "Left Exit"], "class_name": "path 0", "class_id": 0, }, { "match_any_class": ["path 1", "path1", "path one"], "match_all_attributes": ["Left Edge", "Left Exit"], "class_name": "path 1", "class_id": 1, }, { "match_any_class": ["path 1", "path1", "path one"], "match_all_attributes": ["Left Edge", "Left Exit"], "match_all_attributes_allow_others": True, "class_name": "others allowed", "class_id": 2, }, { "match_any_class": [], "match_all_attributes": ["Left Edge"], "match_all_attributes_allow_others": True, "class_name": "others allowed", "class_id": 3, }, ] class_only_mapping = [ { "match_any_class": ["path 0", "path0", "path zero"], "class_name": "path 0", "class_id": 0, }, { "match_any_class": ["path 1", "path1", "path one"], "class_name": "path 1", "class_id": 1, }, ] attribute_mapping = {"Left Edge": 1, "Left Exit": 2} class TestClassAttributeMapper(tf.test.TestCase): def test_class_attribute_mapping(self): with self.cached_session() as sess: mapper = ClassAttributeMapper( class_attribute_mapping, "Default", -1, attribute_mapping, -1 ) polygon_2d_label = Polygon2DLabel( vertices=None, # vertices currently don't matter classes=fixtures.make_tags(classes), attributes=fixtures.make_tags(attributes), ) mapped_polygon_2d_label = mapper(polygon_2d_label) sess.run(tf.compat.v1.tables_initializer()) self.assertAllEqual( [1, -1, 2, 1, 0, 3], mapped_polygon_2d_label.classes.values ) self.assertAllEqual( [1, 2, 1, 2, -1, 1, 2, 1, 2, 1, 2, 1], mapped_polygon_2d_label.attributes.values, ) # Empty attributes arrays for cases like polenet that only have classes def test_empty_attributes_mapping(self): with self.cached_session() as sess: empty_attributes = tf.SparseTensor( indices=tf.zeros((0, 4), tf.int64), values=tf.constant([], dtype=tf.string), dense_shape=tf.constant((0, 0, 0, 0), dtype=tf.int64), ) polygon_2d_label = Polygon2DLabel( vertices=None, # vertices currently don't matter classes=fixtures.make_tags(classes), attributes=empty_attributes, ) mapper = ClassAttributeMapper( class_attribute_mapping, "Default", -1, attribute_mapping, -1 ) mapped_polygon_2d_label = mapper(polygon_2d_label) sess.run(tf.compat.v1.tables_initializer()) self.assertAllEqual( [-1, -1, -1, -1, -1], mapped_polygon_2d_label.classes.values ) def test_class_only_mapping(self): with self.cached_session() as sess: mapper = ClassAttributeMapper(class_only_mapping, "Default", -1, {}, -1) polygon_2d_label = Polygon2DLabel( vertices=None, # vertices currently don't matter classes=fixtures.make_tags(classes), attributes=fixtures.make_tags(attributes), ) mapped_polygon_2d_label = mapper(polygon_2d_label) sess.run(tf.compat.v1.tables_initializer()) self.assertAllEqual( [1, -1, 1, 1, 0, -1], mapped_polygon_2d_label.classes.values ) self.assertAllEqual( [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], mapped_polygon_2d_label.attributes.values, ) def test_class_attribute_mapping_removed_attributes(self): with self.cached_session() as sess: updated_class_attribute_mapping = [] for mapping in class_attribute_mapping: updated_mapping = mapping.copy() updated_mapping.update({"remove_matched_attributes": True}) updated_class_attribute_mapping.append(updated_mapping) mapper = ClassAttributeMapper( updated_class_attribute_mapping, "Default", -1, attribute_mapping, -1 ) polygon_2d_label = Polygon2DLabel( vertices=None, # vertices currently don't matter classes=fixtures.make_tags(classes), attributes=fixtures.make_tags(attributes), ) mapped_polygon_2d_label = mapper(polygon_2d_label) sess.run(tf.compat.v1.tables_initializer()) self.assertAllEqual( [1, -1, 2, 1, 0, 3], mapped_polygon_2d_label.classes.values ) self.assertAllEqual([1, 2, -1], mapped_polygon_2d_label.attributes.values) def test_serialization_and_deserialization(self): """Test that it is a TAOObject that can be serialized and deserialized.""" mapper = ClassAttributeMapper( class_attribute_mapping, "Default", -1, attribute_mapping, -1 ) mapper_dict = mapper.serialize() deserialized_mapper = deserialize_tao_object(mapper_dict) self.assertAllEqual( mapper._attribute_mappings, deserialized_mapper._attribute_mappings ) self.assertAllEqual( mapper._default_attribute_id, deserialized_mapper._default_attribute_id ) self.assertAllEqual( mapper._default_class_name, deserialized_mapper._default_class_name )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/class_attribute_mapper_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for RandomZoom processor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import re from mock import patch from parameterized import parameterized from nvidia_tao_tf1.blocks.multi_source_loader.processors.processor_test_case import ( ProcessorTestCase, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.random_zoom import ( RandomZoom, ) from nvidia_tao_tf1.core.coreobject import deserialize_tao_object from modulus.processors.augment.spatial import random_zoom_matrix def _esc(message): """Escape passed in string for regular expressions.""" return re.escape(message) class TestRandomZoom(ProcessorTestCase): @parameterized.expand( [ [ -0.1, _esc( "RandomZoom.probability (-0.1) is not within the range [0.0, 1.0]." ), ], [ 1.1, _esc( "RandomZoom.probability (1.1) is not within the range [0.0, 1.0]." ), ], ] ) def test_raises_on_invalid_probability(self, probability, message): with self.assertRaisesRegexp(ValueError, message): RandomZoom(probability=probability) @patch( "modulus.processors.augment.spatial.random_zoom_matrix", side_effect=random_zoom_matrix, ) def test_delegates_to_random_zoom_matrix(self, spied_random_zoom_matrix): example = self.make_example_128x240() augmentation = RandomZoom(ratio_min=0.1, ratio_max=2.0, probability=1.0) augmentation.process(example) spied_random_zoom_matrix.assert_called_with( ratio_min=0.1, ratio_max=2.0, width=240, height=128, batch_size=None ) def test_serialization_and_deserialization(self): """Test that it is a TAOObject that can be serialized and deserialized.""" augmentation = RandomZoom(ratio_min=1, ratio_max=5, probability=1.0) augmentation_dict = augmentation.serialize() deserialized_augmentation = deserialize_tao_object(augmentation_dict) self.assertEqual( str(augmentation._transform), str(deserialized_augmentation._transform) )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/random_zoom_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for RandomRotation processor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from mock import Mock, patch import pytest from nvidia_tao_tf1.blocks.multi_source_loader.processors.processor_test_case import ( ProcessorTestCase, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.random_rotation import ( RandomRotation, ) from nvidia_tao_tf1.core.coreobject import deserialize_tao_object class TestRandomRotation(ProcessorTestCase): @patch( "nvidia_tao_tf1.blocks.multi_source_loader.processors.random_rotation._RandomRotation" ) def test_forwards_constructor_arguments_to_transform( self, mocked_rotation_transform ): min_angle = Mock() max_angle = Mock() probability = Mock() RandomRotation( min_angle=min_angle, max_angle=max_angle, probability=probability ) mocked_rotation_transform.assert_called_with( min_angle=min_angle, max_angle=max_angle, probability=probability ) @patch( "nvidia_tao_tf1.blocks.multi_source_loader.processors.random_rotation._RandomRotation" ) def test_forwards_transform_value_errors_to_caller(self, mocked_rotation_transform): def raise_value_error(**kwargs): raise ValueError("test error") mocked_rotation_transform.side_effect = raise_value_error with pytest.raises(ValueError) as exc: RandomRotation(min_angle=4, max_angle=7, probability=1.0) assert "test error" in str(exc) def test_serialization_and_deserialization(self): """Test that it is a TAOObject that can be serialized and deserialized.""" augmentation = RandomRotation(min_angle=4, max_angle=7, probability=1.0) augmentation_dict = augmentation.serialize() deserialized_augmentation = deserialize_tao_object(augmentation_dict) self.assertEqual( str(augmentation._transform), str(deserialized_augmentation._transform) )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/random_rotation_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Processor for converting polylines into polygons.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import logging import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.types.coordinates2d import ( Coordinates2D, ) from nvidia_tao_tf1.blocks.multi_source_loader.types.polygon2d_label import ( Polygon2DLabel, ) from nvidia_tao_tf1.core.coreobject import TAOObject, save_args from nvidia_tao_tf1.core.processors.processors import is_sparse, load_custom_tf_op logger = logging.getLogger(__name__) class MultiplePolylineToPolygon(TAOObject): """Processor that converts multiple polylines to polygons to be later rasterized.""" @save_args def __init__(self, attribute_id_list, class_id_list): """ Construct a MultiplePolylineToPolygon processor. Args: attribute_id_list: list of attribute ids that need to be mapped to class ids. (The list should be unique as this will be used as key in a one to one mapping) class_id_list: list of class ids that the attribute ids are mapped to. """ super(MultiplePolylineToPolygon, self).__init__() self._attribute_id_list = attribute_id_list self._class_id_list = class_id_list def process(self, labels2d): """ Combine polylines based on attribute ID into polygons. These polygons are consumed by the polygon rasterizer. Args: labels2d (Polygon2DLabel): A label containing 2D polygons/polylines and their associated classes and attributes. The first two dimensions of each tensor that this structure contains should be batch/example followed by a frame/time dimension. The rest of the dimensions encode type specific information. See Polygon2DLabel documentation for details. Returns: (Polygon2DLabel): A label with the same format as before, with components with the label to be converted treated as polylines and then converted to polygons. """ logger.info("Building mulitple polyline to polygon conversion.") self.labels2d = labels2d self.polygons = labels2d.vertices.coordinates self.class_ids_per_polygon = labels2d.classes self.attributes_per_polygon = labels2d.attributes assert is_sparse(self.polygons) assert is_sparse(self.class_ids_per_polygon) assert is_sparse(self.attributes_per_polygon) output_labels = tf.cond( pred=tf.equal(tf.size(input=self.polygons.indices), 0), true_fn=self._no_op, false_fn=self._apply_op, ) return output_labels def _no_op(self): """ Return input as is to the output. Returns: (Polygon2DLabel): A label with the same format as before, with components with the label to be converted treated as polylines and then converted to polygons. """ return Polygon2DLabel( vertices=Coordinates2D( coordinates=self.polygons, canvas_shape=self.labels2d.vertices.canvas_shape, ), classes=self.class_ids_per_polygon, attributes=self.attributes_per_polygon, ) def _apply_op(self): """ Apply Multiple Polyline to Polygon Op and return labels. Returns: (Polygon2DLabel): A label with the same format as before, with components with the label to be converted treated as polylines and then converted to polygons. """ op = load_custom_tf_op("op_multiple_polyline_to_polygon.so", __file__) attribute_id_list = tf.constant(self._attribute_id_list, dtype=tf.int32) class_id_list = tf.constant(self._class_id_list, dtype=tf.int32) ( op_indices, op_values, op_dense_shape, op_class_ids_indices, op_class_ids_values, op_class_ids_shape, ) = op.multiple_polyline_to_polygon( polygon_indices=self.polygons.indices, polygon_values=self.polygons.values, polygon_dense_shape=self.polygons.dense_shape, attribute_indices=self.attributes_per_polygon.indices, attribute_values=self.attributes_per_polygon.values, attribute_shape=self.attributes_per_polygon.dense_shape, attribute_id_list=attribute_id_list, class_id_list=class_id_list, ) converted_polygons = tf.SparseTensor( indices=op_indices, values=op_values, dense_shape=op_dense_shape ) converted_class_ids = tf.SparseTensor( indices=op_class_ids_indices, values=op_class_ids_values, dense_shape=op_class_ids_shape, ) return Polygon2DLabel( vertices=Coordinates2D( coordinates=converted_polygons, canvas_shape=self.labels2d.vertices.canvas_shape, ), classes=converted_class_ids, attributes=self.attributes_per_polygon, )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/multiple_polyline_to_polygon.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Processor for applying random translation augmentations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import pytest import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.processors.gaussian_kernel import ( gaussian_kernel, ) @pytest.mark.parametrize( "size, mean, stddev, expected_value", [ [2, 0.0, None, [0.7655643, 0.23443568]], [2, 0.0, 1.0, [0.62245935, 0.37754068]], [1, 0.0, None, [1.0]], [3, 0.0, None, [0.23899426, 0.52201146, 0.23899426]], [3, 0.0, 5.0, [0.33110374, 0.3377925, 0.33110374]], ], ) def test_gaussian_kernel(size, mean, stddev, expected_value): value = gaussian_kernel(size, mean, stddev) with tf.compat.v1.Session() as sess: np_value = sess.run(value) np.testing.assert_array_equal( np_value.transpose(1, 0), np.expand_dims(np.asarray(expected_value, np.float32), axis=0), )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/gaussian_kernel_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Processor for applying spatial/temporal/color transformations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.data_format import CHANNELS_FIRST from nvidia_tao_tf1.blocks.multi_source_loader.data_format import CHANNELS_LAST from nvidia_tao_tf1.blocks.multi_source_loader.processors.processor import ( Processor, ) from nvidia_tao_tf1.blocks.multi_source_loader.types import ( Example, FEATURE_CAMERA, LABEL_MAP, PolygonLabel, SequenceExample, ) from nvidia_tao_tf1.core.processors import ColorTransform, PolygonTransform, SpatialTransform from nvidia_tao_tf1.core.processors import Processor as ModulusProcessor from nvidia_tao_tf1.core.types import Canvas2D, Transform class TransformProcessor(Processor): """Processor that applies spatial and color transformations.""" def __init__(self, transform): """ Construct processor that uses a transformation. Args: transform (Transform): Transform to apply. """ super(TransformProcessor, self).__init__() if transform is None: raise ValueError("Transform should not be None") self._transform = transform self._is_built = False # Default to channels last for backward compatibility of unit tests. self.data_format = CHANNELS_LAST def _build(self, data_format): self._color_transform = ColorTransform( min_clip=0.0, max_clip=1.0, data_format=str(data_format) ) self._polygon_transform = PolygonTransform() self._spatial_transform = SpatialTransform( method="bilinear", background_value=0.5, data_format=str(data_format), verbose=False, ) self._is_built = True @property def supported_formats(self): """Data formats supported by this processor. Returns: data_formats (list of 'DataFormat'): Input data formats that this processor supports. """ return [CHANNELS_FIRST, CHANNELS_LAST] def can_compose(self, other): """ Determine whether two processors can be composed into a single one. Args: other (Processor): Other processor instance. Returns: (Boolean): True if the other processor is also a TransformProcessor. """ return isinstance(other, TransformProcessor) def compose(self, other): """Compose two TransformProcessors into a single one.""" if not isinstance(other, TransformProcessor): raise TypeError( "Tried to compose TransformProcessor with type: {}".format(type(other)) ) composite = CompositeTransform([self._transform, other._transform]) return TransformProcessor(transform=composite) def process(self, example): """ Process examples by applying transformations in sequence.. Args: example (Example): Examples to process in format specified by data_format. Returns: example (TransformedExample): Example with all transformations applied. """ def _get_shape_as_list(tensor): # Try static shape inference first. If it fails, use run time shape. shape = tensor.shape.as_list() runtime_shape = tf.shape(input=tensor) for i, dim in enumerate(shape): if dim is None: shape[i] = runtime_shape[i] return shape if not self._is_built: self._build(self.data_format) if isinstance(example, SequenceExample): # Depending on where in the data loader pipeline TransformProcessor is called, # feature_camera can be either Images2D or Images2DReference. Both namedtuples # have canvas_shape member, so we can use the same code for both. feature_camera = example.instances[FEATURE_CAMERA] # Get shapes (static shape if known, run time shape otherwise). canvas_height_shape = _get_shape_as_list(feature_camera.canvas_shape.height) canvas_width_shape = _get_shape_as_list(feature_camera.canvas_shape.width) # Check the number of dimensions: # 1: Spatial dimension only. # 2: Sequence and spatial dimensions. # 3: Batch, sequence, and spatial dimensions. rank = len(canvas_height_shape) assert 1 <= rank <= 3 # Infer batch shape, width and height. batch_shape = [canvas_height_shape[0]] if rank == 3 else None height = canvas_height_shape[-1] width = canvas_width_shape[-1] identity_transformation = Transform( canvas_shape=Canvas2D(height=height, width=width), color_transform_matrix=tf.eye( 4, batch_shape=batch_shape, dtype=tf.float32 ), spatial_transform_matrix=tf.eye( 3, batch_shape=batch_shape, dtype=tf.float32 ), ) transformation = self._transform(identity_transformation) # NOTE: We're encoding the canvas height as a vector of shape [Height] and width as # a vector of shape [Width]. This is done so that we can use TF static shapes to # pass shape information here as Python values (i.e. at graph construction time.) # Being able to set the shape of the transformed images here enables us to decouple # the dataloader and the estimator/model. The shape of all images coming out of # the dataloader will be fully defined at graph construction time. # Replace canvas width and height by transformed values, propagate the other # dimensions as is. canvas_height_shape[-1] = transformation.canvas_shape.height canvas_width_shape[-1] = transformation.canvas_shape.width transformation = Transform( canvas_shape=Canvas2D( height=tf.zeros(canvas_height_shape), width=tf.zeros(canvas_width_shape), ), color_transform_matrix=transformation.color_transform_matrix, spatial_transform_matrix=transformation.spatial_transform_matrix, ) return example.transform(transformation) # Legacy LaneNet dataloader expect transformations to be applied # here. TODO(vkallioniemi): remove this functionality once we delete # the old dataloader. axis = self.data_format.axis_4d frame = example.instances[FEATURE_CAMERA] input_shape = frame.get_shape().as_list() identity_transformation = Transform( canvas_shape=Canvas2D( height=input_shape[axis.row], width=input_shape[axis.column] ), color_transform_matrix=tf.eye(4, dtype=tf.float32), spatial_transform_matrix=tf.eye(3, dtype=tf.float32), ) transformation = self._transform(identity_transformation) return self._apply_transformation_to_example(transformation, example) def _color_transform_frames(self, frames, color_transform_matrix): """Return new frames by applying the color transform matrix against input frames.""" axis = self.data_format.axis_4d input_shape = tf.shape(input=frames) batch_size = input_shape[axis.batch] color_transform_matrices = tf.tile( tf.expand_dims(color_transform_matrix, axis=0), [batch_size, 1, 1] ) return self._color_transform(frames, ctms=color_transform_matrices) def _spatial_transform_frames(self, frames, spatial_transform_matrix, canvas_shape): """Return new frames by applying the spatial transform matrix against input frames.""" axis = self.data_format.axis_4d input_shape = tf.shape(input=frames) batch_size = input_shape[axis.batch] stms = tf.tile( tf.expand_dims(spatial_transform_matrix, axis=0), [batch_size, 1, 1] ) return self._spatial_transform( frames, stms=stms, shape=(int(canvas_shape.height), int(canvas_shape.width)) ) def _spatial_transform_label(self, label, spatial_transform_matrix): """Return new PolygonLabel by applying the spatial transform matrix against input label.""" transformed_polygons = self._polygon_transform( label.polygons, spatial_transform_matrix ) return PolygonLabel( polygons=transformed_polygons, vertices_per_polygon=label.vertices_per_polygon, class_ids_per_polygon=label.class_ids_per_polygon, attributes_per_polygon=label.attributes_per_polygon, polygons_per_image=label.polygons_per_image, attributes=label.attributes, attribute_count_per_polygon=label.attribute_count_per_polygon, ) def _apply_transformation_to_example(self, transformation, example): """Return new Example by applying the transformation against the input example.""" instances = example.instances labels = example.labels if FEATURE_CAMERA in example.instances: frames = example.instances[FEATURE_CAMERA] if transformation.spatial_transform_matrix is not None: frames = self._spatial_transform_frames( frames, transformation.spatial_transform_matrix, canvas_shape=transformation.canvas_shape, ) if transformation.color_transform_matrix is not None: frames = self._color_transform_frames( frames, transformation.color_transform_matrix ) instances[FEATURE_CAMERA] = frames if LABEL_MAP in example.labels: polygons = example.labels[LABEL_MAP] polygons = self._spatial_transform_label( polygons, transformation.spatial_transform_matrix ) labels[LABEL_MAP] = polygons return Example(instances=instances, labels=labels) class CompositeTransform(ModulusProcessor): """Sequence of transform processors composed into one.""" def __init__(self, transforms, **kwargs): """Construct a pipeline of transforms applied in sequence.""" super(CompositeTransform, self).__init__(**kwargs) self._transforms = transforms def __len__(self): """Return number of transforms this pipeline consists of.""" return len(self._transforms) def call(self, transformation): """Produce a transformation by applying all transforms in sequence.""" output = transformation for transform in self._transforms: output = transform(output) return output
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/transform_processor.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Processor for adjusting bounding box labels after cropping.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.data_format import CHANNELS_FIRST from nvidia_tao_tf1.blocks.multi_source_loader.data_format import CHANNELS_LAST from nvidia_tao_tf1.blocks.multi_source_loader.processors.processor import ( Processor, ) from nvidia_tao_tf1.blocks.multi_source_loader.types import LABEL_OBJECT from nvidia_tao_tf1.blocks.multi_source_loader.types import SequenceExample from nvidia_tao_tf1.blocks.multi_source_loader.types import TransformedExample from nvidia_tao_tf1.core.coreobject import save_args from nvidia_tao_tf1.core.types import Example class BboxClipper(Processor): """Processor for adjusting bounding box labels after cropping. The following changes need to be made to bounding box labels: 1) Labels completely out of the network's input are discarded. 2) Labels that are 'half-in, half-out' should have their coordinates clipped to the input crop. 3) Labels from 2) also should have their ``truncation_type`` updated accordingly. """ @save_args def __init__(self, crop_left=0, crop_right=0, crop_top=0, crop_bottom=0): """Constructor. If all of the provided crop coordinates are or 0, this processor will amount to a no-op. Args: crop_left (int): Left-most coordinate of the crop region. crop_right (int): Right-most coordinate of the crop region. crop_top (int): Top-most coordinate of the crop region. crop_bottom (int): Bottom-most coordinate of the crop region. Raises: ValueError: if crop_left > crop_right, or crop_top > crop_bottom. """ super(BboxClipper, self).__init__() self._no_op = False all_crop_coords = {crop_left, crop_right, crop_top, crop_bottom} if all_crop_coords == {0}: self._no_op = True if not self._no_op: if crop_left >= crop_right or crop_top >= crop_bottom: raise ValueError( "Provided crop coordinates result in a non-sensical crop-region." ) self._crop_left = float(crop_left) self._crop_right = float(crop_right) self._crop_bottom = float(crop_bottom) self._crop_top = float(crop_top) @property def supported_formats(self): """Data formats supported by this processor. Returns: data_formats (list of 'DataFormat'): Input data formats that this processor supports. """ return [CHANNELS_FIRST, CHANNELS_LAST] def can_compose(self, other): """ Determine whether two processors can be composed into a single one. Args: other (Processor): Other processor instance. Returns: (bool): True if this processor knows how to compose the other processor. """ return False def compose(self, other): """Compose two processors into a single one.""" raise NotImplementedError("BboxClipper.compose not supported.") def _get_indices_inside_crop(self, coords): """Get indices for bounding boxes that are at least partially inside the crop region. Args: coords (tf.Tensor): Float tensor of shape (N, 4) where N is the number of bounding boxes. Each bbox has coordinates in the order [L, T, R, B]. Returns: valid_indices (tf.Tensor): Boolean tensor of shape (N,) indicating which bounding boxes in the input are at least partially inside the crop region. """ valid_indices = tf.ones(tf.shape(input=coords)[0], dtype=tf.bool) # False if left-most coordinate is to the right of the crop's region. valid_indices = tf.logical_and( valid_indices, tf.less(coords[:, 0], self._crop_right) ) # False if right-most coordinate is to the left of the crop's region. valid_indices = tf.logical_and( valid_indices, tf.greater(coords[:, 2], self._crop_left) ) # False if top-most coordinate is to the bottom of the crop's region. valid_indices = tf.logical_and( valid_indices, tf.less(coords[:, 1], self._crop_bottom) ) # False if bottom-most coordinate is to the top of the crop's region. valid_indices = tf.logical_and( valid_indices, tf.greater(coords[:, 3], self._crop_top) ) return valid_indices def _adjust_truncation_type(self, bbox_2d_label): """Adjust the truncation_type of a label if it is half-in, half-out of the crop. Args: bbox_2d_label (Bbox2DLabel): Label instance for which we will update the truncation_type. Returns: adjusted_label (Bbox2DLabel): Adjusted version of ``bbox_2d_label``. """ if isinstance(bbox_2d_label.truncation_type, tf.SparseTensor): new_coords = bbox_2d_label.vertices.coordinates.values # Get LTRB. x1, y1, x2, y2 = ( new_coords[::4], new_coords[1::4], new_coords[2::4], new_coords[3::4], ) left_most_in = tf.logical_and( tf.greater_equal(x1, self._crop_left), tf.less_equal(x1, self._crop_right), ) top_most_in = tf.logical_and( tf.greater_equal(y1, self._crop_top), tf.less_equal(y1, self._crop_bottom), ) right_most_in = tf.logical_and( tf.greater_equal(x2, self._crop_left), tf.less_equal(x2, self._crop_right), ) bottom_most_in = tf.logical_and( tf.greater_equal(y2, self._crop_top), tf.less_equal(y2, self._crop_bottom), ) # Needs adjustment if top-left corner is inside and bottom-right corner is outside, or # vice versa. half_in_half_out = tf.math.logical_xor( tf.logical_and(left_most_in, top_most_in), tf.logical_and(right_most_in, bottom_most_in), ) old_truncation_type = bbox_2d_label.truncation_type new_truncation_type_values = tf.cast( tf.logical_or( tf.cast( old_truncation_type.values, dtype=tf.bool ), # Why is this int32?? half_in_half_out, ), dtype=tf.int32, ) new_truncation_type = tf.SparseTensor( values=new_truncation_type_values, indices=old_truncation_type.indices, dense_shape=old_truncation_type.dense_shape, ) return bbox_2d_label._replace(truncation_type=new_truncation_type) # This corresponds to the case where the `truncation_type` field is not present. return bbox_2d_label def _clip_to_crop_region(self, bbox_2d_label): """Clip the coordinates to the crop region. Args: bbox_2d_label (Bbox2DLabel): Label instance to clip. Returns: clipped_label (Bbox2DLabel): Clipped version of ``bbox_2d_label``. """ input_coords = bbox_2d_label.vertices.coordinates.values xmin, ymin, xmax, ymax = ( input_coords[::4], input_coords[1::4], input_coords[2::4], input_coords[3::4], ) xmin = tf.clip_by_value(xmin, self._crop_left, self._crop_right) ymin = tf.clip_by_value(ymin, self._crop_top, self._crop_bottom) xmax = tf.clip_by_value(xmax, self._crop_left, self._crop_right) ymax = tf.clip_by_value(ymax, self._crop_top, self._crop_bottom) clipped_coords = tf.stack([xmin, ymin, xmax, ymax], axis=1) clipped_coords = tf.reshape(clipped_coords, [-1]) # Flatten. new_coords = tf.SparseTensor( values=clipped_coords, indices=bbox_2d_label.vertices.coordinates.indices, dense_shape=bbox_2d_label.vertices.coordinates.dense_shape, ) new_vertices = bbox_2d_label.vertices._replace(coordinates=new_coords) return bbox_2d_label._replace(vertices=new_vertices) def _adjust_bbox_2d_label(self, bbox_2d_label): """Apply adjustments due to cropping to bounding box labels. Args: bbox_2d_label (Bbox2DLabel): Label instance to apply the adjustments to. Returns: adjusted_label (Bbox2DLabel): Adjusted version of ``bbox_2d_label``. """ input_coords = bbox_2d_label.vertices.coordinates.values # For convenience, reshape input coordinates. input_coords = tf.reshape(input_coords, [-1, 4]) # Order is L, T, R, B. # First, figure out which ones are completely outside the crop. valid_indices = self._get_indices_inside_crop(input_coords) adjusted_label = bbox_2d_label.filter(valid_indices) # Now, determine, which ones need to have their coordinates clipped and truncation_type # adjusted. adjusted_label = self._adjust_truncation_type(adjusted_label) adjusted_label = self._clip_to_crop_region(adjusted_label) return adjusted_label def process(self, example): """ Process an example. Args: example (Example): Example with frames in format specified by data_format. Returns: (Example): Processed example. Raises: ValueError: Since this processor explicitly needs to be applied after transformations (if they are present), it does not accept TransformedExample. """ if isinstance(example, TransformedExample): raise ValueError( "BboxClipper should be applied on labels that have been transformed." ) if not self._no_op: if isinstance(example, (Example, SequenceExample)): if LABEL_OBJECT in example.labels: example.labels[LABEL_OBJECT] = self._adjust_bbox_2d_label( bbox_2d_label=example.labels[LABEL_OBJECT] ) return example
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/bbox_clipper.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Processor that forms temporal batches.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from nvidia_tao_tf1.core import processors from nvidia_tao_tf1.blocks.multi_source_loader import types # This is mostly copy-pasted code from SlidingWindowSequence in Modulus. The biggest change is # that it uses the types.Session structure for session information. class TemporalBatcher(processors.Processor): """Sliding window dataset with frames in the same sequence. Takes in a dataset, slides a window over it and returns the result if all elements in the current window come from the same sequence. NOTE: If the input dataset is not ordered by (1) session_uuid (2) frame_number The sliding window sequence will not work correctly, and might return an empty dataset as it could be unable to find a sequence where the session_uuids match. Furthermore, it does not assert for frame_number order or striding at all, so the sequence of frames (when the session_uuid and camera_name are unique) will be the frame-order that was presented to it. """ def __init__(self, size=1, **kwargs): """ Construct a temporal batcher. Args: size (int): Length of the sequence expressed in number of timesteps. shift (int): Shift between multiple windows. stride (int): Stride between timesteps within a window. """ if size < 1: raise ValueError( "TemporalBatcher.size must be a positive number, not: {}".format(size) ) self.size = size super(TemporalBatcher, self).__init__(**kwargs) def __repr__(self): """Return string representation of this processor.""" return "TemporalBatcher(size={})".format(self.size) @staticmethod def predicate(example): """Predicate function that determines if the current input should be considered a sequence. Args: example (SequenceExample): The ``Example`` namedtuple containing the tensors. Returns: A tf.bool dependent on whether the current input samples are in sequence. It returns True only of the ``session_uuid`` inside the input sequence are identical. """ def _all_elements_identical(tensor): unique, _ = tf.unique(tensor) n_unique = tf.shape(input=unique)[0] return tf.equal(n_unique, 1) if types.FEATURE_SESSION not in example.instances: raise ValueError( "FEATURE_SESSION is required for temporal batching but is not present " "in example.instances." ) # TODO(vkallioniemi): Add an assertion and/or make this more robust. The current # implementation relies on the sequence_extender script to ensure that datasets are # aligned at sequence boundaries. return _all_elements_identical(example.instances[types.FEATURE_SESSION].uuid) def call(self, dataset): """Process dataset by grouping consecutive frames into sequences. Args: dataset (tf.data.Dataset<SequenceExample>): Input dataset on which to perform temporal batching on. Returns: (tf.data.Dataset<SequenceExample>): Examples from the input dataset batched temporally. All tensors included in the example will gain an additional timestep dimension (e.g. a CHW image will become a TCHW, where T matches ``size``.) """ dataset = dataset.batch(self.size, drop_remainder=True) # We only need to filter if there was actually temporal batching. if self.size > 1: dataset = dataset.filter(predicate=self.predicate) return dataset
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/temporal_batcher.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for RandomGlimpse processor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import re from mock import patch from parameterized import parameterized import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.processors.processor_test_case import ( ProcessorTestCase, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.random_glimpse import ( RandomGlimpse, ) from nvidia_tao_tf1.blocks.multi_source_loader.types import ( Example, FEATURE_CAMERA, LABEL_MAP, ) from nvidia_tao_tf1.core.coreobject import deserialize_tao_object def _esc(message): """Escape passed in string for regular expressions.""" return re.escape(message) class TestRandomGlimpseConstruction(ProcessorTestCase): @parameterized.expand([[1, 1], [1, 2], [2, 1]]) def test_valid_bounds_do_not_raise(self, height, width): RandomGlimpse( crop_location=RandomGlimpse.CENTER, crop_probability=0.5, height=height, width=width, ) def test_raises_on_invalid_crop_location(self): with self.assertRaisesRegexp( ValueError, _esc( "RandomGlimpse.crop_location 'under' is not \ supported. Valid options: center, random." ), ): RandomGlimpse(crop_location="under", height=7, width=7) @parameterized.expand([[RandomGlimpse.CENTER], [RandomGlimpse.RANDOM]]) def test_valid_crop_location_does_not_raise(self, crop_location): RandomGlimpse(crop_location=crop_location, height=12, width=24) @parameterized.expand([[0.0], [0.5], [1.0]]) def test_accepts_valid_probability(self, probability): RandomGlimpse( crop_location=RandomGlimpse.CENTER, crop_probability=probability, height=7, width=7, ) class TestRandomGlimpseProcessing(ProcessorTestCase): def test_scales_to_requested_height_and_width(self): example = self.make_example_128x240() expected_frames = tf.ones((1, 64, 120, 3)) expected_labels = self.make_polygon_label([[60, 0.0], [120, 64], [0.0, 64]]) with self.test_session(): random_glimpse = RandomGlimpse( crop_location=RandomGlimpse.CENTER, crop_probability=0.0, height=64, width=120, ) scaled = random_glimpse.process(example) self.assertAllClose( expected_frames.eval(), scaled.instances[FEATURE_CAMERA].eval() ) self.assert_labels_close(expected_labels, scaled.labels[LABEL_MAP]) def test_crops_center_crop(self): frames = tf.ones((1, 128, 240, 3)) labels = self.make_polygon_label( vertices=[[61, 33], [179, 33], [179, 95], [61, 95]] ) example = Example( instances={FEATURE_CAMERA: frames}, labels={LABEL_MAP: labels} ) expected_frames = tf.ones((1, 64, 120, 3)) # Center crop from 128x240 to 64x120 moves Xs 60 to the left and Ys 32 towards the top. expected_labels = self.make_polygon_label( vertices=[[1.0, 1.0], [119.0, 1.0], [119.0, 63.0], [1.0, 63.0]] ) with self.test_session(): random_glimpse = RandomGlimpse( crop_location=RandomGlimpse.CENTER, crop_probability=1.0, height=64, width=120, ) cropped = random_glimpse.process(example) self.assertAllClose( expected_frames.eval(), cropped.instances[FEATURE_CAMERA].eval() ) self.assert_labels_close(expected_labels, cropped.labels[LABEL_MAP]) @patch("modulus.processors.augment.random_glimpse.tf.random.uniform") def test_crops_random_location(self, mocked_random_uniform): frames = tf.ones((1, 128, 240, 3)) labels = self.make_polygon_label( vertices=[[60, 32], [180, 32], [180, 96], [60, 96]] ) example = Example( instances={FEATURE_CAMERA: frames}, labels={LABEL_MAP: labels} ) # All calls to random_uniform will return 0.5 causing left_x and top_y equal to 0.5. # This causes random crop to happen always from (0.5, 0.5) to (64.5, 120.5) which is # achieved by shifting all points 0.5 pixels towards the left and top. Always returning # 0.5 causes crop to always be performed as 0.5 < 1.0. mocked_random_uniform.return_value = tf.constant(0.5, dtype=tf.float32) expected_frames = tf.ones((1, 64, 120, 3)) expected_labels = self.make_polygon_label( vertices=[[59.5, 31.5], [179.5, 31.5], [179.5, 95.5], [59.5, 95.5]] ) with self.test_session(): random_glimpse = RandomGlimpse( crop_location=RandomGlimpse.RANDOM, crop_probability=1.0, height=64, width=120, ) cropped = random_glimpse.process(example) self.assertAllClose( expected_frames.eval(), cropped.instances[FEATURE_CAMERA].eval() ) self.assert_labels_close(expected_labels, cropped.labels[LABEL_MAP]) def test_serialization_and_deserialization(self): """Test that it is a TAOObject that can be serialized and deserialized.""" random_glimpse = RandomGlimpse( crop_location=RandomGlimpse.RANDOM, crop_probability=1.0, height=64, width=120, ) random_glimpse_dict = random_glimpse.serialize() deserialized_random_glimpse = deserialize_tao_object(random_glimpse_dict) self.assertEqual( str(random_glimpse._transform), str(deserialized_random_glimpse._transform) )
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/random_glimpse_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Processor that scales images and labels.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from nvidia_tao_tf1.blocks.multi_source_loader.processors.transform_processor import ( TransformProcessor, ) from nvidia_tao_tf1.core.coreobject import save_args from nvidia_tao_tf1.core.processors import Scale as _Scale class Scale(TransformProcessor): """Processor for scaling images and labels.""" @save_args def __init__(self, height, width): """Construct a Scale processor. Args: height (float) New height to which contents will be scaled up/down to. width (float) New width to which contents will be scaled up/down/to. """ super(Scale, self).__init__(_Scale(height=height, width=width))
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/scale.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Processor for applying 2D filtering on images.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from abc import abstractmethod, abstractproperty import numpy as np import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.data_format import ( CHANNELS_FIRST, CHANNELS_LAST, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.processor import ( Processor, ) from nvidia_tao_tf1.blocks.multi_source_loader.types import ( Canvas2D, Example, FEATURE_CAMERA, FEATURE_SESSION, Images2D, SequenceExample, TransformedExample, ) from nvidia_tao_tf1.core.coreobject import save_args class Filter2DProcessor(Processor): """Processor that applies 2D filtering operations.""" @save_args def __init__(self): """Construct processor that uses a filter operation.""" super(Filter2DProcessor, self).__init__() @property def supported_formats(self): """Data formats supported by this processor. Returns: data_formats (list of 'DataFormat'): Input data formats that this processor supports. """ return [CHANNELS_FIRST, CHANNELS_LAST] @abstractproperty def probability(self): """Probability to apply filters.""" @abstractmethod def get_filters(self): """Return a list of filters.""" raise NotImplementedError("Processors.get_fitlers not implemented.") def can_compose(self, other): """Can't compose in Filter2DProcessor.""" return False def compose(self, other): """Does not support composing.""" raise NotImplementedError("ComposableProcessor.compose not implemented") def process(self, example): """ Process examples by applying filters in sequence. The filters are applied in a 2D convolution fashion. Args: example (Example): Examples to process in format specified by data_format. Returns: example (Example): Example with a 2D filter applied. """ if isinstance(example, TransformedExample): example = example.example if not isinstance(example, Example) and not isinstance( example, SequenceExample ): raise TypeError("Tried process input of type: {}".format(type(example))) if self.probability is None: example = self._apply_filters_to_example(self.get_filters(), example) else: should_filter = np.random.uniform(0, 1, 1) if should_filter < self.probability: example = self._apply_filters_to_example(self.get_filters(), example) return example def _apply_filters_to_example(self, processor_filters, example): """Return new Example by applying the processor_filter against the input example. Args: processor_filters (list): A list of filters. example (Example): Examples to process in format specified by data_format. Return: example (Example): Example with a 2D filter applied. """ if isinstance(example, SequenceExample): instances = example.instances images = instances[FEATURE_CAMERA].images images_rank = len(images.get_shape().as_list()) if images_rank == 5: images = tf.squeeze(images, 1) else: # Legacy LaneNet dataloader expect transformations to be applied # here. TODO(mlehr): remove this functionality once we delete # the old dataloader. instances = example.instances images = instances[FEATURE_CAMERA] images_rank = len(images.get_shape().as_list()) # H, W, and C are read as values because we need them to set the shape of the image before # leaving the method. # We read N as a tensor because its value is not known. if self.data_format == CHANNELS_LAST: _, H, W, C = images.get_shape().as_list() N = tf.shape(input=images)[0] else: _, C, H, W = images.get_shape().as_list() N = tf.shape(input=images)[0] if self.data_format == CHANNELS_LAST: # images dim = [N, 3, 160, 480] -> images dim = [N, 3, 160, 480] images = tf.transpose(a=images, perm=[0, 3, 1, 2]) # images dim = [N, 3, 160, 480] -> images dim = [N*3, 160, 480, 1] images = tf.reshape(images, shape=[N * C, H, W, 1]) images = self._apply_conv_kernels(processor_filters, images) # images dim = [N*3, 160, 480, 1] -> images dim = [N, 3, 160, 480] images = tf.reshape(images, shape=[N, C, H, W]) if self.data_format == CHANNELS_LAST: # images dim = [N, 3, 160, 480] -> images dim = [N, 160, 480, 3] images = tf.transpose(a=images, perm=[0, 2, 3, 1]) # TODO(mlehr): Make it more general. The instances dictionary could have other keys too. if isinstance(example, SequenceExample): canvas_height = example.instances[FEATURE_CAMERA].canvas_shape.height canvas_width = example.instances[FEATURE_CAMERA].canvas_shape.width canvas_shape = Canvas2D(height=canvas_height, width=canvas_width) instances = { FEATURE_CAMERA: Images2D( images=tf.expand_dims(images, axis=1) if images_rank == 5 else images, canvas_shape=canvas_shape, ), FEATURE_SESSION: instances[FEATURE_SESSION], } return SequenceExample(instances=instances, labels=example.labels) instances[FEATURE_CAMERA] = images return Example(instances=instances, labels=example.labels) def _apply_conv_kernels(self, processor_filters, images): """Return filtered images. Args: processor_filters (list): A list of filters. images (4DTensor): Image tensor with the shape of NHWC. Return: images (4DTensor): Image tensor with the shape of NHWC. """ combined_padding_height = sum( tf.shape(input=f)[0] - 1 for f in processor_filters ) combined_padding_width = sum( tf.shape(input=f)[1] - 1 for f in processor_filters ) # Pad images before filtering. pad_top = combined_padding_height // 2 pad_bottom = combined_padding_height - pad_top pad_left = combined_padding_width // 2 pad_right = combined_padding_width - pad_left # images dim = [N*3, 160, 480, 1] -> images dim = [N*3, 164, 484, 1]. images = tf.pad( tensor=images, paddings=[[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]], mode="SYMMETRIC", ) for one_processor_filter in processor_filters: # [kernel_height, kernel_width, 1] one_processor_filter = tf.expand_dims(one_processor_filter, axis=-1) # [kernel_height, kernel_width, 1, 1] one_processor_filter = tf.expand_dims(one_processor_filter, axis=-1) # images dim = [N*3, 164, 484, 1] -> images dim = [N*3, 160, 484, 1] # images dim = [N*3, 160, 484, 1] -> images dim = [N*3, 160, 480, 1] # Always NHWC here b/c conv2d CPU supports this format. images = tf.nn.conv2d( input=images, filters=one_processor_filter, strides=[1, 1, 1, 1], padding="VALID", data_format="NHWC", ) return images
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/filter2d_processor.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for TestFilterProcessor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from mock import Mock import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.data_format import ( CHANNELS_FIRST, CHANNELS_LAST, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.filter2d_processor import ( Filter2DProcessor, ) from nvidia_tao_tf1.blocks.multi_source_loader.processors.processor_test_case import ( ProcessorTestCase, ) from nvidia_tao_tf1.blocks.multi_source_loader.types import FEATURE_CAMERA from nvidia_tao_tf1.core.coreobject import deserialize_tao_object class ConstantFilter2DProcessor(Filter2DProcessor): @property def probability(self): return 1.0 def get_filters(self): return [ tf.reshape(tf.constant([0.5]), shape=[1, 1]), tf.reshape(tf.constant([0.6]), shape=[1, 1]), ] class TestFilterProcessor(ProcessorTestCase): def test_supports_channels_first_and_channels_last(self): processor = ConstantFilter2DProcessor() assert processor.supported_formats == [CHANNELS_FIRST, CHANNELS_LAST] def test_process_fails_on_none(self): with self.assertRaises(TypeError): ConstantFilter2DProcessor().process(None) def test_process_fails_when_input_is_not_an_example(self): with self.assertRaises(TypeError): ConstantFilter2DProcessor().process(Mock()) def test_uses_provided_filter(self): processor = ConstantFilter2DProcessor() processor.data_format = CHANNELS_LAST with self.test_session() as session: example = self.make_example_128x240() np_example = session.run(example.instances[FEATURE_CAMERA]) processed = processor.process(example) np_processed = session.run(processed.instances[FEATURE_CAMERA]) self.assertAllClose(np_processed, 0.3 * np_example) def test_serialization_and_deserialization(self): """Test that it is a TAOObject that can be serialized and deserialized.""" processor = ConstantFilter2DProcessor() processor_dict = processor.serialize() deserialized_processor_dict = deserialize_tao_object(processor_dict) assert processor.data_format == deserialized_processor_dict.data_format
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/processors/filter2d_processor_test.py
"""Module containing custom ops for the multi-source dataloader."""
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/lib/__init__.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import pytest import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader.sources.tfrecords_data_source import ( TFRecordsDataSource, ) from nvidia_tao_tf1.blocks.multi_source_loader.types import ( FEATURE_CAMERA, Images2DReference, LABEL_MAP, Polygon2DLabel, ) from nvidia_tao_tf1.core.coreobject import deserialize_tao_object class TFRecordsDataSourceTest(tf.test.TestCase): @classmethod def setUpClass(cls): cls.image_dir = ( "moduluspy/modulus/blocks/data_loaders/" "multi_source_loader/sources/testdata/" ) cls.tfrecords_path = ( "moduluspy/modulus/blocks/data_loaders/" "multi_source_loader/sources/testdata/test_1.tfrecords" ) cls.tfrecords_path2 = ( "moduluspy/modulus/blocks/data_loaders/" "multi_source_loader/sources/testdata/test_2.tfrecords" ) def test_raises_on_when_image_dir_does_not_exist(self): with pytest.raises(ValueError) as exception: TFRecordsDataSource( tfrecord_path=self.tfrecords_path, image_dir="/no/such/image_dir", extension=".jpg", height=1208, width=1920, channels=3, subset_size=0, ) assert "/no/such/image_dir" in str(exception) def test_raises_on_when_tfrecords_file_does_not_exist(self): with pytest.raises(ValueError) as exception: TFRecordsDataSource( tfrecord_path="/no/such/tfrecords/file", image_dir=self.image_dir, extension=".jpg", height=1208, width=1920, channels=3, subset_size=0, ) assert "/no/such/tfrecords/file" in str(exception) def test_length(self): source = TFRecordsDataSource( tfrecord_path=self.tfrecords_path, image_dir=self.image_dir, extension=".jpg", height=604, width=960, channels=3, subset_size=0, ) assert 3 == len(source) def test_length_with_list_of_tfrecords(self): source = TFRecordsDataSource( tfrecord_path=[self.tfrecords_path, self.tfrecords_path2], image_dir=self.image_dir, extension=".jpg", height=604, width=960, channels=3, subset_size=0, ) assert 6 == len(source) def test_end_to_end(self): source = TFRecordsDataSource( tfrecord_path=self.tfrecords_path, image_dir=self.image_dir, extension=".jpg", height=1208, width=1920, channels=3, subset_size=0, ) dataset = source() dataset = dataset.apply(source.parse_example) iterator = tf.compat.v1.data.make_one_shot_iterator(dataset) get_next = iterator.get_next() with self.test_session() as sess: read_examples = 0 while True: try: example = sess.run(get_next) read_examples += 1 image_references = example.instances[FEATURE_CAMERA] assert Images2DReference == type(image_references) assert (1208,) == image_references.canvas_shape.height.shape assert (1920,) == image_references.canvas_shape.width.shape label = example.labels[LABEL_MAP] assert isinstance(label, Polygon2DLabel) assert (3,) == label.vertices.coordinates.dense_shape.shape except tf.errors.OutOfRangeError: break assert len(source) == read_examples def test_zero_sampling_ratio_defaults_to_one(self): source = TFRecordsDataSource( tfrecord_path=self.tfrecords_path, image_dir=self.image_dir, extension=".jpg", height=1208, width=1920, channels=3, subset_size=0, ) assert source.sample_ratio == 1.0 def test_sampling_ratio_is_set(self): sample_ratio = 0.2 source = TFRecordsDataSource( tfrecord_path=self.tfrecords_path, image_dir=self.image_dir, extension=".jpg", height=1208, width=1920, channels=3, subset_size=0, sample_ratio=sample_ratio, ) assert source.sample_ratio == sample_ratio def test_serialization_and_deserialization(self): sample_ratio = 0.2 source = TFRecordsDataSource( tfrecord_path=self.tfrecords_path, image_dir=self.image_dir, extension=".jpg", height=1208, width=1920, channels=3, subset_size=0, sample_ratio=sample_ratio, ) source_dict = source.serialize() deserialized_source = deserialize_tao_object(source_dict) assert source.tfrecord_path == deserialized_source.tfrecord_path assert source.image_dir == deserialized_source.image_dir assert source.extension == deserialized_source.extension assert source.height == deserialized_source.height assert source.width == deserialized_source.width assert source.channels == deserialized_source.channels assert source.subset_size == deserialized_source.subset_size assert source.sample_ratio == deserialized_source.sample_ratio
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/sources/tfrecords_data_source_test.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """DataSource interface for accessing datasets.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from abc import abstractmethod import io import sys import tensorflow as tf from nvidia_tao_tf1.blocks.multi_source_loader import processors from nvidia_tao_tf1.core.coreobject import AbstractTAOObject class DataSource(AbstractTAOObject): """ Interface for adding new types of datasets. Datasets are stored in different on-disk formats (e.g. tfrecords, sqlite). The DataSource interface is meant to normalize/standardize datasets to output Example namedtuples so that the rest of the training pipeline does not need to know about the on-disk differences. The interface implementor is expected to provide 3 properties. These 3 properties are accessed in this order to ensure memory efficient loading of the data: 1. dataset property is used to load metadata data in source specific format. The DataLoader makes no assumptions about the structure of the data yielded by this dataset. 2. dataset.apply is called on the processor returned by the parse_example property. The DataLoader assumes that the processor returns a dataset composed of individual examples that can still be in a source specific format (e.g. tfrecord.) * Example is currently the namedtuple found in types.py, but will be transitioned to a more flexible/generic format documented in this design doc: https://docs.google.com/document/d/1qXBUvRt-umAfkHB3KOiUDDXzHD986HRhdtGAZ29vQpQ TODO(vkallioniemi): Update ^^ docs when new Example format is adopted. """ def __init__(self, preprocessing=None, sample_ratio=1.0, extension=None): """ Constructs a data source. Args: preprocessing (Pipeline or list[Processor]): Optional preprocessing processors specific to this dataset. Defaults to no preprocessing. sample_ratio (float): Optional frequency at which a sample from this data source is picked for inclusion in a batch. Defaults to 1.0. extension (str): Extension of the data files. E.g., '.fp16'. """ super(DataSource, self).__init__() if preprocessing is None: preprocessing = processors.Pipeline([]) if sample_ratio < 0: raise ValueError("Sample ratio {} cannot be < 0.".format(sample_ratio)) self.preprocessing = preprocessing self.sample_ratio = sample_ratio self.extension = extension @abstractmethod def call(self): """Build a dataset. Returns: (tf.data.Dataset): Dataset that produces source specific pieces of data. """ raise NotImplementedError("DataSource.call not implemented.") def __call__(self): """Build a dataset. Returns: (tf.data.Dataset): Dataset that produces source specific pieces of data. """ return self.call() @abstractmethod def __len__(self): """Returns the number of examples in this dataset.""" raise NotImplementedError("DataSource.__len__ not implemented.") def __str__(self): """Return a string representation of this data source.""" if sys.version_info >= (3, 0): out = io.StringIO() else: out = io.BytesIO() self.summary(print_fn=lambda string: print(string, file=out)) return out.getvalue() def summary(self, print_fn=None): """ Print a summary of the contents of this data source. Args: data_loader (DataLoader): Data loader to summarize. """ if print_fn is None: print_fn = print print_fn(" - samples: {}".format(len(self))) print_fn(" - sample ratio: {}".format(self.sample_ratio)) print_fn(" - extension: {}".format(self.extension)) if self.preprocessing: print_fn(" - preprocessing:") for processor in self.preprocessing: print_fn(" - {}".format(processor)) @property def parse_example(self): """ Return processor/function that can be applied to a dataset to parse it. The function returned must have this signature: `def parser(dataset: tf.data.Dataset[R]) -> tf.data.Dataset[T]`, where types R and T are DataSource implementation specific. After this processor is applied, dataset can still be in source specific format, but each item yielded by the dataset is expected to be an indivdual example. """ # Deprecated and will be removed. DataSource.dataset will be expected to return parsed # examples. return None def supports_sharding(self): """Whether this source can do its own sharding.""" return False def set_shard(self, num_shards, shard_id, pseudo_sharding=False): """ Sets the sharding configuration of this source. Args: num_shards (int): The number of shards. shard_id (int): Shard id from 0 to num_shards - 1. pseudo_sharding (bool) if True, then data is not actually sharded, but different shuffle seeds are used to differentiate shard batches. """ raise NotImplementedError() def supports_shuffling(self): """Whether this source can do its own shuffling.""" return False def set_shuffle(self, buffer_size): """Enables shuffling on this data source.""" raise NotImplementedError() def set_sequence_length(self, sequence_length): """Sets the sequence length (number of frames in sequence).""" pass def supports_temporal_batching(self): """Whether this source does its own temporal batching.""" return False def initialize(self): """Called by data loaders after all configuration is done.""" pass def get_image_properties(self): """Returns the maximum width and height image for this data source.""" return 0, 0 def set_image_properties(self, max_image_width, max_image_height): """Overrides the max image width and height of this data source for padding purposes.""" pass @property def image_extension(self): """Returns the image file extension.""" return self.extension @property def image_dtype(self): """The default dtype of images for this data source. Returns the dtype of images for this data source. Return: (tf.dtypes.Dtype) Returned dtype of images for this data source. """ if self.image_extension in [".jpeg", ".jpg", ".png"]: return tf.uint8 if self.image_extension == ".fp16": return tf.float16 return tf.float32
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/sources/data_source.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Oversampling strategy using ratios between rare and dominant classes. This strategy is meant to mimic the one used during TFRecords generation for DriveNet via the dlav.drivenet.dataio.sample_modifier. The concepts to take note of here: * source class: This typically corresponds to the 'classifier' feature found in SQLite HumanLoop exports. In the particular use case of object detection, typical values include 'automobile', 'cvip', 'heavy truck', ... * target class: This corresponds to what one may wish to map a source class to. e.g. one may map 'automobile' and 'cvip' to the same target class 'car', or 'rider' and 'person' to 'person', etc. * dominant target classes / rare classes: The latter is taken implicitly as whatever is _not_ a dominant target class. Dominant target classes have in practice been the more represented classes, such as 'car' or 'road sign', the implication being that 'bicycle' or 'person' were the rare target classes. In a given frame, if the # (any rare target class) > some factor * # (any dominant target class), then that frame is duplicated a specified amount of times. 'some factor' corresponds to the entry minimum_target_class_imbalance[rare_target_class]. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from collections import Counter from nvidia_tao_tf1.core.dataloader.dataset import OverSamplingStrategy from nvidia_tao_tf1.core.coreobject import save_args class ImbalanceOverSamplingStrategy(OverSamplingStrategy): """Oversampling strategy using ratios between rare and dominant classes.""" @save_args def __init__( self, dominant_target_classes, minimum_target_class_imbalance, num_duplicates, source_to_target_class_mapping, ): """Constructor. Args: dominant_target_classes (list): List of strings indicating the dominant target classes: target classes to be considered as dominant when determining whether to duplicate a sample. minimum_target_class_imbalance (dict): Target class - float pairs indicating the minimum imbalance determining when to duplicate. Basically, if the class imbalance within the frame is larger than this, duplicate. E.g. if #bicycles / #dominant class objects > minimum_target_class_imbalance[bicycle], duplicate. Default value for a class is 1.0 if not given. num_duplicates (int): Number of duplicate samples to be added when the duplication condition above is fulfilled. If a sample is to be duplicated, it will appear (num_duplicates + 1) times in total. source_to_target_class_mapping (dict): Mapping from label/source classes to target classes. """ self._dominant_target_classes = dominant_target_classes self._minimum_target_class_imbalance = minimum_target_class_imbalance self._num_duplicates = num_duplicates self._source_to_target_class_mapping = source_to_target_class_mapping def oversample(self, frame_groups, count_lookup): """Determines which frames to oversample. Args: frame_groups (list): List of list of tuples. Outer list is over frame groups, inner list contains a tuple of (frame id(int), unlabeled(bool)). count_lookup (dict): Maps from frame ID (int) to another dict, that maps from classifier (str) to occurrence (int). Returns: repeated_groups (list): Follows the same structure as `frame_groups`. It should contain the frames that are to be repeated. """ repeated_groups = [] for frame_group in frame_groups: num_duplicates = 1 for frame_id, _ in frame_group: class_counts = Counter() for classifier, classifier_lookup in count_lookup[frame_id].items(): count = classifier_lookup["COUNT"] if classifier in self._source_to_target_class_mapping: class_counts[ self._source_to_target_class_mapping[classifier] ] += count rare_target_classes = [ class_id for class_id in class_counts if class_id not in self._dominant_target_classes ] repeat = any( class_counts[rare_target_class] > class_counts[dominant_target_class] * self._minimum_target_class_imbalance[rare_target_class] for rare_target_class in rare_target_classes for dominant_target_class in self._dominant_target_classes ) if repeat: num_duplicates += self._num_duplicates for _ in range(num_duplicates): repeated_groups.append(frame_group) return repeated_groups
tao_tensorflow1_backend-main
nvidia_tao_tf1/blocks/multi_source_loader/sources/imbalance_oversampling.py