python_code
stringlengths
0
679k
repo_name
stringlengths
9
41
file_path
stringlengths
6
149
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.core.batcher.""" import numpy as np import tensorflow as tf from object_detection.core import batcher slim = tf.contrib.slim class BatcherTest(tf.test.TestCase): def test_batch_and_unpad_2d_tensors_of_different_sizes_in_1st_dimension(self): with self.test_session() as sess: batch_size = 3 num_batches = 2 examples = tf.Variable(tf.constant(2, dtype=tf.int32)) counter = examples.count_up_to(num_batches * batch_size + 2) boxes = tf.tile( tf.reshape(tf.range(4), [1, 4]), tf.stack([counter, tf.constant(1)])) batch_queue = batcher.BatchQueue( tensor_dict={'boxes': boxes}, batch_size=batch_size, batch_queue_capacity=100, num_batch_queue_threads=1, prefetch_queue_capacity=100) batch = batch_queue.dequeue() for tensor_dict in batch: for tensor in tensor_dict.values(): self.assertAllEqual([None, 4], tensor.get_shape().as_list()) tf.initialize_all_variables().run() with slim.queues.QueueRunners(sess): i = 2 for _ in range(num_batches): batch_np = sess.run(batch) for tensor_dict in batch_np: for tensor in tensor_dict.values(): self.assertAllEqual(tensor, np.tile(np.arange(4), (i, 1))) i += 1 with self.assertRaises(tf.errors.OutOfRangeError): sess.run(batch) def test_batch_and_unpad_2d_tensors_of_different_sizes_in_all_dimensions( self): with self.test_session() as sess: batch_size = 3 num_batches = 2 examples = tf.Variable(tf.constant(2, dtype=tf.int32)) counter = examples.count_up_to(num_batches * batch_size + 2) image = tf.reshape( tf.range(counter * counter), tf.stack([counter, counter])) batch_queue = batcher.BatchQueue( tensor_dict={'image': image}, batch_size=batch_size, batch_queue_capacity=100, num_batch_queue_threads=1, prefetch_queue_capacity=100) batch = batch_queue.dequeue() for tensor_dict in batch: for tensor in tensor_dict.values(): self.assertAllEqual([None, None], tensor.get_shape().as_list()) tf.initialize_all_variables().run() with slim.queues.QueueRunners(sess): i = 2 for _ in range(num_batches): batch_np = sess.run(batch) for tensor_dict in batch_np: for tensor in tensor_dict.values(): self.assertAllEqual(tensor, np.arange(i * i).reshape((i, i))) i += 1 with self.assertRaises(tf.errors.OutOfRangeError): sess.run(batch) def test_batch_and_unpad_2d_tensors_of_same_size_in_all_dimensions(self): with self.test_session() as sess: batch_size = 3 num_batches = 2 examples = tf.Variable(tf.constant(1, dtype=tf.int32)) counter = examples.count_up_to(num_batches * batch_size + 1) image = tf.reshape(tf.range(1, 13), [4, 3]) * counter batch_queue = batcher.BatchQueue( tensor_dict={'image': image}, batch_size=batch_size, batch_queue_capacity=100, num_batch_queue_threads=1, prefetch_queue_capacity=100) batch = batch_queue.dequeue() for tensor_dict in batch: for tensor in tensor_dict.values(): self.assertAllEqual([4, 3], tensor.get_shape().as_list()) tf.initialize_all_variables().run() with slim.queues.QueueRunners(sess): i = 1 for _ in range(num_batches): batch_np = sess.run(batch) for tensor_dict in batch_np: for tensor in tensor_dict.values(): self.assertAllEqual(tensor, np.arange(1, 13).reshape((4, 3)) * i) i += 1 with self.assertRaises(tf.errors.OutOfRangeError): sess.run(batch) def test_batcher_when_batch_size_is_one(self): with self.test_session() as sess: batch_size = 1 num_batches = 2 examples = tf.Variable(tf.constant(2, dtype=tf.int32)) counter = examples.count_up_to(num_batches * batch_size + 2) image = tf.reshape( tf.range(counter * counter), tf.stack([counter, counter])) batch_queue = batcher.BatchQueue( tensor_dict={'image': image}, batch_size=batch_size, batch_queue_capacity=100, num_batch_queue_threads=1, prefetch_queue_capacity=100) batch = batch_queue.dequeue() for tensor_dict in batch: for tensor in tensor_dict.values(): self.assertAllEqual([None, None], tensor.get_shape().as_list()) tf.initialize_all_variables().run() with slim.queues.QueueRunners(sess): i = 2 for _ in range(num_batches): batch_np = sess.run(batch) for tensor_dict in batch_np: for tensor in tensor_dict.values(): self.assertAllEqual(tensor, np.arange(i * i).reshape((i, i))) i += 1 with self.assertRaises(tf.errors.OutOfRangeError): sess.run(batch) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/core/batcher_test.py
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/core/__init__.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains classes specifying naming conventions used for object detection. Specifies: InputDataFields: standard fields used by reader/preprocessor/batcher. DetectionResultFields: standard fields returned by object detector. BoxListFields: standard field used by BoxList TfExampleFields: standard fields for tf-example data format (go/tf-example). """ class InputDataFields(object): """Names for the input tensors. Holds the standard data field names to use for identifying input tensors. This should be used by the decoder to identify keys for the returned tensor_dict containing input tensors. And it should be used by the model to identify the tensors it needs. Attributes: image: image. image_additional_channels: additional channels. original_image: image in the original input size. original_image_spatial_shape: image in the original input size. key: unique key corresponding to image. source_id: source of the original image. filename: original filename of the dataset (without common path). groundtruth_image_classes: image-level class labels. groundtruth_image_confidences: image-level class confidences. groundtruth_boxes: coordinates of the ground truth boxes in the image. groundtruth_classes: box-level class labels. groundtruth_confidences: box-level class confidences. The shape should be the same as the shape of groundtruth_classes. groundtruth_label_types: box-level label types (e.g. explicit negative). groundtruth_is_crowd: [DEPRECATED, use groundtruth_group_of instead] is the groundtruth a single object or a crowd. groundtruth_area: area of a groundtruth segment. groundtruth_difficult: is a `difficult` object groundtruth_group_of: is a `group_of` objects, e.g. multiple objects of the same class, forming a connected group, where instances are heavily occluding each other. proposal_boxes: coordinates of object proposal boxes. proposal_objectness: objectness score of each proposal. groundtruth_instance_masks: ground truth instance masks. groundtruth_instance_boundaries: ground truth instance boundaries. groundtruth_instance_classes: instance mask-level class labels. groundtruth_keypoints: ground truth keypoints. groundtruth_keypoint_visibilities: ground truth keypoint visibilities. groundtruth_label_weights: groundtruth label weights. groundtruth_weights: groundtruth weight factor for bounding boxes. num_groundtruth_boxes: number of groundtruth boxes. is_annotated: whether an image has been labeled or not. true_image_shapes: true shapes of images in the resized images, as resized images can be padded with zeros. multiclass_scores: the label score per class for each box. """ image = 'image' image_additional_channels = 'image_additional_channels' original_image = 'original_image' original_image_spatial_shape = 'original_image_spatial_shape' key = 'key' source_id = 'source_id' filename = 'filename' groundtruth_image_classes = 'groundtruth_image_classes' groundtruth_image_confidences = 'groundtruth_image_confidences' groundtruth_boxes = 'groundtruth_boxes' groundtruth_classes = 'groundtruth_classes' groundtruth_confidences = 'groundtruth_confidences' groundtruth_label_types = 'groundtruth_label_types' groundtruth_is_crowd = 'groundtruth_is_crowd' groundtruth_area = 'groundtruth_area' groundtruth_difficult = 'groundtruth_difficult' groundtruth_group_of = 'groundtruth_group_of' proposal_boxes = 'proposal_boxes' proposal_objectness = 'proposal_objectness' groundtruth_instance_masks = 'groundtruth_instance_masks' groundtruth_instance_boundaries = 'groundtruth_instance_boundaries' groundtruth_instance_classes = 'groundtruth_instance_classes' groundtruth_keypoints = 'groundtruth_keypoints' groundtruth_keypoint_visibilities = 'groundtruth_keypoint_visibilities' groundtruth_label_weights = 'groundtruth_label_weights' groundtruth_weights = 'groundtruth_weights' num_groundtruth_boxes = 'num_groundtruth_boxes' is_annotated = 'is_annotated' true_image_shape = 'true_image_shape' multiclass_scores = 'multiclass_scores' class DetectionResultFields(object): """Naming conventions for storing the output of the detector. Attributes: source_id: source of the original image. key: unique key corresponding to image. detection_boxes: coordinates of the detection boxes in the image. detection_scores: detection scores for the detection boxes in the image. detection_classes: detection-level class labels. detection_masks: contains a segmentation mask for each detection box. detection_boundaries: contains an object boundary for each detection box. detection_keypoints: contains detection keypoints for each detection box. num_detections: number of detections in the batch. """ source_id = 'source_id' key = 'key' detection_boxes = 'detection_boxes' detection_scores = 'detection_scores' detection_classes = 'detection_classes' detection_masks = 'detection_masks' detection_boundaries = 'detection_boundaries' detection_keypoints = 'detection_keypoints' num_detections = 'num_detections' class BoxListFields(object): """Naming conventions for BoxLists. Attributes: boxes: bounding box coordinates. classes: classes per bounding box. scores: scores per bounding box. weights: sample weights per bounding box. objectness: objectness score per bounding box. masks: masks per bounding box. boundaries: boundaries per bounding box. keypoints: keypoints per bounding box. keypoint_heatmaps: keypoint heatmaps per bounding box. is_crowd: is_crowd annotation per bounding box. """ boxes = 'boxes' classes = 'classes' scores = 'scores' weights = 'weights' confidences = 'confidences' objectness = 'objectness' masks = 'masks' boundaries = 'boundaries' keypoints = 'keypoints' keypoint_heatmaps = 'keypoint_heatmaps' is_crowd = 'is_crowd' class TfExampleFields(object): """TF-example proto feature names for object detection. Holds the standard feature names to load from an Example proto for object detection. Attributes: image_encoded: JPEG encoded string image_format: image format, e.g. "JPEG" filename: filename channels: number of channels of image colorspace: colorspace, e.g. "RGB" height: height of image in pixels, e.g. 462 width: width of image in pixels, e.g. 581 source_id: original source of the image image_class_text: image-level label in text format image_class_label: image-level label in numerical format object_class_text: labels in text format, e.g. ["person", "cat"] object_class_label: labels in numbers, e.g. [16, 8] object_bbox_xmin: xmin coordinates of groundtruth box, e.g. 10, 30 object_bbox_xmax: xmax coordinates of groundtruth box, e.g. 50, 40 object_bbox_ymin: ymin coordinates of groundtruth box, e.g. 40, 50 object_bbox_ymax: ymax coordinates of groundtruth box, e.g. 80, 70 object_view: viewpoint of object, e.g. ["frontal", "left"] object_truncated: is object truncated, e.g. [true, false] object_occluded: is object occluded, e.g. [true, false] object_difficult: is object difficult, e.g. [true, false] object_group_of: is object a single object or a group of objects object_depiction: is object a depiction object_is_crowd: [DEPRECATED, use object_group_of instead] is the object a single object or a crowd object_segment_area: the area of the segment. object_weight: a weight factor for the object's bounding box. instance_masks: instance segmentation masks. instance_boundaries: instance boundaries. instance_classes: Classes for each instance segmentation mask. detection_class_label: class label in numbers. detection_bbox_ymin: ymin coordinates of a detection box. detection_bbox_xmin: xmin coordinates of a detection box. detection_bbox_ymax: ymax coordinates of a detection box. detection_bbox_xmax: xmax coordinates of a detection box. detection_score: detection score for the class label and box. """ image_encoded = 'image/encoded' image_format = 'image/format' # format is reserved keyword filename = 'image/filename' channels = 'image/channels' colorspace = 'image/colorspace' height = 'image/height' width = 'image/width' source_id = 'image/source_id' image_class_text = 'image/class/text' image_class_label = 'image/class/label' object_class_text = 'image/object/class/text' object_class_label = 'image/object/class/label' object_bbox_ymin = 'image/object/bbox/ymin' object_bbox_xmin = 'image/object/bbox/xmin' object_bbox_ymax = 'image/object/bbox/ymax' object_bbox_xmax = 'image/object/bbox/xmax' object_view = 'image/object/view' object_truncated = 'image/object/truncated' object_occluded = 'image/object/occluded' object_difficult = 'image/object/difficult' object_group_of = 'image/object/group_of' object_depiction = 'image/object/depiction' object_is_crowd = 'image/object/is_crowd' object_segment_area = 'image/object/segment/area' object_weight = 'image/object/weight' instance_masks = 'image/segmentation/object' instance_boundaries = 'image/boundaries/object' instance_classes = 'image/segmentation/object/class' detection_class_label = 'image/detection/label' detection_bbox_ymin = 'image/detection/bbox/ymin' detection_bbox_xmin = 'image/detection/bbox/xmin' detection_bbox_ymax = 'image/detection/bbox/ymax' detection_bbox_xmax = 'image/detection/bbox/xmax' detection_score = 'image/detection/score'
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/core/standard_fields.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.core.box_list.""" import tensorflow as tf from object_detection.core import box_list class BoxListTest(tf.test.TestCase): """Tests for BoxList class.""" def test_num_boxes(self): data = tf.constant([[0, 0, 1, 1], [1, 1, 2, 3], [3, 4, 5, 5]], tf.float32) expected_num_boxes = 3 boxes = box_list.BoxList(data) with self.test_session() as sess: num_boxes_output = sess.run(boxes.num_boxes()) self.assertEquals(num_boxes_output, expected_num_boxes) def test_get_correct_center_coordinates_and_sizes(self): boxes = [[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]] boxes = box_list.BoxList(tf.constant(boxes)) centers_sizes = boxes.get_center_coordinates_and_sizes() expected_centers_sizes = [[15, 0.35], [12.5, 0.25], [10, 0.3], [5, 0.3]] with self.test_session() as sess: centers_sizes_out = sess.run(centers_sizes) self.assertAllClose(centers_sizes_out, expected_centers_sizes) def test_create_box_list_with_dynamic_shape(self): data = tf.constant([[0, 0, 1, 1], [1, 1, 2, 3], [3, 4, 5, 5]], tf.float32) indices = tf.reshape(tf.where(tf.greater([1, 0, 1], 0)), [-1]) data = tf.gather(data, indices) assert data.get_shape().as_list() == [None, 4] expected_num_boxes = 2 boxes = box_list.BoxList(data) with self.test_session() as sess: num_boxes_output = sess.run(boxes.num_boxes()) self.assertEquals(num_boxes_output, expected_num_boxes) def test_transpose_coordinates(self): boxes = [[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]] boxes = box_list.BoxList(tf.constant(boxes)) boxes.transpose_coordinates() expected_corners = [[10.0, 10.0, 15.0, 20.0], [0.1, 0.2, 0.4, 0.5]] with self.test_session() as sess: corners_out = sess.run(boxes.get()) self.assertAllClose(corners_out, expected_corners) def test_box_list_invalid_inputs(self): data0 = tf.constant([[[0, 0, 1, 1], [3, 4, 5, 5]]], tf.float32) data1 = tf.constant([[0, 0, 1], [1, 1, 2], [3, 4, 5]], tf.float32) data2 = tf.constant([[0, 0, 1], [1, 1, 2], [3, 4, 5]], tf.int32) with self.assertRaises(ValueError): _ = box_list.BoxList(data0) with self.assertRaises(ValueError): _ = box_list.BoxList(data1) with self.assertRaises(ValueError): _ = box_list.BoxList(data2) def test_num_boxes_static(self): box_corners = [[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]] boxes = box_list.BoxList(tf.constant(box_corners)) self.assertEquals(boxes.num_boxes_static(), 2) self.assertEquals(type(boxes.num_boxes_static()), int) def test_num_boxes_static_for_uninferrable_shape(self): placeholder = tf.placeholder(tf.float32, shape=[None, 4]) boxes = box_list.BoxList(placeholder) self.assertEquals(boxes.num_boxes_static(), None) def test_as_tensor_dict(self): boxlist = box_list.BoxList( tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5]], tf.float32)) boxlist.add_field('classes', tf.constant([0, 1])) boxlist.add_field('scores', tf.constant([0.75, 0.2])) tensor_dict = boxlist.as_tensor_dict() expected_boxes = [[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5]] expected_classes = [0, 1] expected_scores = [0.75, 0.2] with self.test_session() as sess: tensor_dict_out = sess.run(tensor_dict) self.assertAllEqual(3, len(tensor_dict_out)) self.assertAllClose(expected_boxes, tensor_dict_out['boxes']) self.assertAllEqual(expected_classes, tensor_dict_out['classes']) self.assertAllClose(expected_scores, tensor_dict_out['scores']) def test_as_tensor_dict_with_features(self): boxlist = box_list.BoxList( tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5]], tf.float32)) boxlist.add_field('classes', tf.constant([0, 1])) boxlist.add_field('scores', tf.constant([0.75, 0.2])) tensor_dict = boxlist.as_tensor_dict(['boxes', 'classes', 'scores']) expected_boxes = [[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5]] expected_classes = [0, 1] expected_scores = [0.75, 0.2] with self.test_session() as sess: tensor_dict_out = sess.run(tensor_dict) self.assertAllEqual(3, len(tensor_dict_out)) self.assertAllClose(expected_boxes, tensor_dict_out['boxes']) self.assertAllEqual(expected_classes, tensor_dict_out['classes']) self.assertAllClose(expected_scores, tensor_dict_out['scores']) def test_as_tensor_dict_missing_field(self): boxlist = box_list.BoxList( tf.constant([[0.1, 0.1, 0.4, 0.4], [0.1, 0.1, 0.5, 0.5]], tf.float32)) boxlist.add_field('classes', tf.constant([0, 1])) boxlist.add_field('scores', tf.constant([0.75, 0.2])) with self.assertRaises(ValueError): boxlist.as_tensor_dict(['foo', 'bar']) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/core/box_list_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Base minibatch sampler module. The job of the minibatch_sampler is to subsample a minibatch based on some criterion. The main function call is: subsample(indicator, batch_size, **params). Indicator is a 1d boolean tensor where True denotes which examples can be sampled. It returns a boolean indicator where True denotes an example has been sampled.. Subclasses should implement the Subsample function and can make use of the @staticmethod SubsampleIndicator. """ from abc import ABCMeta from abc import abstractmethod import tensorflow as tf from object_detection.utils import ops class MinibatchSampler(object): """Abstract base class for subsampling minibatches.""" __metaclass__ = ABCMeta def __init__(self): """Constructs a minibatch sampler.""" pass @abstractmethod def subsample(self, indicator, batch_size, **params): """Returns subsample of entries in indicator. Args: indicator: boolean tensor of shape [N] whose True entries can be sampled. batch_size: desired batch size. **params: additional keyword arguments for specific implementations of the MinibatchSampler. Returns: sample_indicator: boolean tensor of shape [N] whose True entries have been sampled. If sum(indicator) >= batch_size, sum(is_sampled) = batch_size """ pass @staticmethod def subsample_indicator(indicator, num_samples): """Subsample indicator vector. Given a boolean indicator vector with M elements set to `True`, the function assigns all but `num_samples` of these previously `True` elements to `False`. If `num_samples` is greater than M, the original indicator vector is returned. Args: indicator: a 1-dimensional boolean tensor indicating which elements are allowed to be sampled and which are not. num_samples: int32 scalar tensor Returns: a boolean tensor with the same shape as input (indicator) tensor """ indices = tf.where(indicator) indices = tf.random_shuffle(indices) indices = tf.reshape(indices, [-1]) num_samples = tf.minimum(tf.size(indices), num_samples) selected_indices = tf.slice(indices, [0], tf.reshape(num_samples, [1])) selected_indicator = ops.indices_to_dense_vector(selected_indices, tf.shape(indicator)[0]) return tf.equal(selected_indicator, 1)
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/core/minibatch_sampler.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Preprocess images and bounding boxes for detection. We perform two sets of operations in preprocessing stage: (a) operations that are applied to both training and testing data, (b) operations that are applied only to training data for the purpose of data augmentation. A preprocessing function receives a set of inputs, e.g. an image and bounding boxes, performs an operation on them, and returns them. Some examples are: randomly cropping the image, randomly mirroring the image, randomly changing the brightness, contrast, hue and randomly jittering the bounding boxes. The preprocess function receives a tensor_dict which is a dictionary that maps different field names to their tensors. For example, tensor_dict[fields.InputDataFields.image] holds the image tensor. The image is a rank 4 tensor: [1, height, width, channels] with dtype=tf.float32. The groundtruth_boxes is a rank 2 tensor: [N, 4] where in each row there is a box with [ymin xmin ymax xmax]. Boxes are in normalized coordinates meaning their coordinate values range in [0, 1] To preprocess multiple images with the same operations in cases where nondeterministic operations are used, a preprocessor_cache.PreprocessorCache object can be passed into the preprocess function or individual operations. All nondeterministic operations except random_jitter_boxes support caching. E.g. Let tensor_dict{1,2,3,4,5} be copies of the same inputs. Let preprocess_options contain nondeterministic operation(s) excluding random_jitter_boxes. cache1 = preprocessor_cache.PreprocessorCache() cache2 = preprocessor_cache.PreprocessorCache() a = preprocess(tensor_dict1, preprocess_options, preprocess_vars_cache=cache1) b = preprocess(tensor_dict2, preprocess_options, preprocess_vars_cache=cache1) c = preprocess(tensor_dict3, preprocess_options, preprocess_vars_cache=cache2) d = preprocess(tensor_dict4, preprocess_options, preprocess_vars_cache=cache2) e = preprocess(tensor_dict5, preprocess_options) Then correspondings tensors of object pairs (a,b) and (c,d) are guaranteed to be equal element-wise, but the equality of any other object pair cannot be determined. Important Note: In tensor_dict, images is a rank 4 tensor, but preprocessing functions receive a rank 3 tensor for processing the image. Thus, inside the preprocess function we squeeze the image to become a rank 3 tensor and then we pass it to the functions. At the end of the preprocess we expand the image back to rank 4. """ import functools import inspect import sys import tensorflow as tf from tensorflow.python.ops import control_flow_ops from object_detection.core import box_list from object_detection.core import box_list_ops from object_detection.core import keypoint_ops from object_detection.core import preprocessor_cache from object_detection.core import standard_fields as fields from object_detection.utils import shape_utils def _apply_with_random_selector(x, func, num_cases, preprocess_vars_cache=None, key=''): """Computes func(x, sel), with sel sampled from [0...num_cases-1]. If both preprocess_vars_cache AND key are the same between two calls, sel will be the same value in both calls. Args: x: input Tensor. func: Python function to apply. num_cases: Python int32, number of cases to sample sel from. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. key: variable identifier for preprocess_vars_cache. Returns: The result of func(x, sel), where func receives the value of the selector as a python integer, but sel is sampled dynamically. """ generator_func = functools.partial( tf.random_uniform, [], maxval=num_cases, dtype=tf.int32) rand_sel = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.SELECTOR, preprocess_vars_cache, key) # Pass the real x only to one of the func calls. return control_flow_ops.merge([func( control_flow_ops.switch(x, tf.equal(rand_sel, case))[1], case) for case in range(num_cases)])[0] def _apply_with_random_selector_tuples(x, func, num_cases, preprocess_vars_cache=None, key=''): """Computes func(x, sel), with sel sampled from [0...num_cases-1]. If both preprocess_vars_cache AND key are the same between two calls, sel will be the same value in both calls. Args: x: A tuple of input tensors. func: Python function to apply. num_cases: Python int32, number of cases to sample sel from. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. key: variable identifier for preprocess_vars_cache. Returns: The result of func(x, sel), where func receives the value of the selector as a python integer, but sel is sampled dynamically. """ num_inputs = len(x) generator_func = functools.partial( tf.random_uniform, [], maxval=num_cases, dtype=tf.int32) rand_sel = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.SELECTOR_TUPLES, preprocess_vars_cache, key) # Pass the real x only to one of the func calls. tuples = [list() for t in x] for case in range(num_cases): new_x = [control_flow_ops.switch(t, tf.equal(rand_sel, case))[1] for t in x] output = func(tuple(new_x), case) for j in range(num_inputs): tuples[j].append(output[j]) for i in range(num_inputs): tuples[i] = control_flow_ops.merge(tuples[i])[0] return tuple(tuples) def _get_or_create_preprocess_rand_vars(generator_func, function_id, preprocess_vars_cache, key=''): """Returns a tensor stored in preprocess_vars_cache or using generator_func. If the tensor was previously generated and appears in the PreprocessorCache, the previously generated tensor will be returned. Otherwise, a new tensor is generated using generator_func and stored in the cache. Args: generator_func: A 0-argument function that generates a tensor. function_id: identifier for the preprocessing function used. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. key: identifier for the variable stored. Returns: The generated tensor. """ if preprocess_vars_cache is not None: var = preprocess_vars_cache.get(function_id, key) if var is None: var = generator_func() preprocess_vars_cache.update(function_id, key, var) else: var = generator_func() return var def _random_integer(minval, maxval, seed): """Returns a random 0-D tensor between minval and maxval. Args: minval: minimum value of the random tensor. maxval: maximum value of the random tensor. seed: random seed. Returns: A random 0-D tensor between minval and maxval. """ return tf.random_uniform( [], minval=minval, maxval=maxval, dtype=tf.int32, seed=seed) # TODO(mttang): This method is needed because the current # tf.image.rgb_to_grayscale method does not support quantization. Replace with # tf.image.rgb_to_grayscale after quantization support is added. def _rgb_to_grayscale(images, name=None): """Converts one or more images from RGB to Grayscale. Outputs a tensor of the same `DType` and rank as `images`. The size of the last dimension of the output is 1, containing the Grayscale value of the pixels. Args: images: The RGB tensor to convert. Last dimension must have size 3 and should contain RGB values. name: A name for the operation (optional). Returns: The converted grayscale image(s). """ with tf.name_scope(name, 'rgb_to_grayscale', [images]) as name: images = tf.convert_to_tensor(images, name='images') # Remember original dtype to so we can convert back if needed orig_dtype = images.dtype flt_image = tf.image.convert_image_dtype(images, tf.float32) # Reference for converting between RGB and grayscale. # https://en.wikipedia.org/wiki/Luma_%28video%29 rgb_weights = [0.2989, 0.5870, 0.1140] rank_1 = tf.expand_dims(tf.rank(images) - 1, 0) gray_float = tf.reduce_sum( flt_image * rgb_weights, rank_1, keep_dims=True) gray_float.set_shape(images.get_shape()[:-1].concatenate([1])) return tf.image.convert_image_dtype(gray_float, orig_dtype, name=name) def normalize_image(image, original_minval, original_maxval, target_minval, target_maxval): """Normalizes pixel values in the image. Moves the pixel values from the current [original_minval, original_maxval] range to a the [target_minval, target_maxval] range. Args: image: rank 3 float32 tensor containing 1 image -> [height, width, channels]. original_minval: current image minimum value. original_maxval: current image maximum value. target_minval: target image minimum value. target_maxval: target image maximum value. Returns: image: image which is the same shape as input image. """ with tf.name_scope('NormalizeImage', values=[image]): original_minval = float(original_minval) original_maxval = float(original_maxval) target_minval = float(target_minval) target_maxval = float(target_maxval) image = tf.to_float(image) image = tf.subtract(image, original_minval) image = tf.multiply(image, (target_maxval - target_minval) / (original_maxval - original_minval)) image = tf.add(image, target_minval) return image def retain_boxes_above_threshold(boxes, labels, label_weights, label_confidences=None, multiclass_scores=None, masks=None, keypoints=None, threshold=0.0): """Retains boxes whose label weight is above a given threshold. If the label weight for a box is missing (represented by NaN), the box is retained. The boxes that don't pass the threshold will not appear in the returned tensor. Args: boxes: float32 tensor of shape [num_instance, 4] representing boxes location in normalized coordinates. labels: rank 1 int32 tensor of shape [num_instance] containing the object classes. label_weights: float32 tensor of shape [num_instance] representing the weight for each box. label_confidences: float32 tensor of shape [num_instance] representing the confidence for each box. multiclass_scores: (optional) float32 tensor of shape [num_instances, num_classes] representing the score for each box for each class. masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. threshold: scalar python float. Returns: retained_boxes: [num_retained_instance, 4] retianed_labels: [num_retained_instance] retained_label_weights: [num_retained_instance] If multiclass_scores, masks, or keypoints are not None, the function also returns: retained_multiclass_scores: [num_retained_instance, num_classes] retained_masks: [num_retained_instance, height, width] retained_keypoints: [num_retained_instance, num_keypoints, 2] """ with tf.name_scope('RetainBoxesAboveThreshold', values=[boxes, labels, label_weights]): indices = tf.where( tf.logical_or(label_weights > threshold, tf.is_nan(label_weights))) indices = tf.squeeze(indices, axis=1) retained_boxes = tf.gather(boxes, indices) retained_labels = tf.gather(labels, indices) retained_label_weights = tf.gather(label_weights, indices) result = [retained_boxes, retained_labels, retained_label_weights] if label_confidences is not None: retained_label_confidences = tf.gather(label_confidences, indices) result.append(retained_label_confidences) if multiclass_scores is not None: retained_multiclass_scores = tf.gather(multiclass_scores, indices) result.append(retained_multiclass_scores) if masks is not None: retained_masks = tf.gather(masks, indices) result.append(retained_masks) if keypoints is not None: retained_keypoints = tf.gather(keypoints, indices) result.append(retained_keypoints) return result def _flip_boxes_left_right(boxes): """Left-right flip the boxes. Args: boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. Returns: Flipped boxes. """ ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1) flipped_xmin = tf.subtract(1.0, xmax) flipped_xmax = tf.subtract(1.0, xmin) flipped_boxes = tf.concat([ymin, flipped_xmin, ymax, flipped_xmax], 1) return flipped_boxes def _flip_boxes_up_down(boxes): """Up-down flip the boxes. Args: boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. Returns: Flipped boxes. """ ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1) flipped_ymin = tf.subtract(1.0, ymax) flipped_ymax = tf.subtract(1.0, ymin) flipped_boxes = tf.concat([flipped_ymin, xmin, flipped_ymax, xmax], 1) return flipped_boxes def _rot90_boxes(boxes): """Rotate boxes counter-clockwise by 90 degrees. Args: boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. Returns: Rotated boxes. """ ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1) rotated_ymin = tf.subtract(1.0, xmax) rotated_ymax = tf.subtract(1.0, xmin) rotated_xmin = ymin rotated_xmax = ymax rotated_boxes = tf.concat( [rotated_ymin, rotated_xmin, rotated_ymax, rotated_xmax], 1) return rotated_boxes def _flip_masks_left_right(masks): """Left-right flip masks. Args: masks: rank 3 float32 tensor with shape [num_instances, height, width] representing instance masks. Returns: flipped masks: rank 3 float32 tensor with shape [num_instances, height, width] representing instance masks. """ return masks[:, :, ::-1] def _flip_masks_up_down(masks): """Up-down flip masks. Args: masks: rank 3 float32 tensor with shape [num_instances, height, width] representing instance masks. Returns: flipped masks: rank 3 float32 tensor with shape [num_instances, height, width] representing instance masks. """ return masks[:, ::-1, :] def _rot90_masks(masks): """Rotate masks counter-clockwise by 90 degrees. Args: masks: rank 3 float32 tensor with shape [num_instances, height, width] representing instance masks. Returns: rotated masks: rank 3 float32 tensor with shape [num_instances, height, width] representing instance masks. """ masks = tf.transpose(masks, [0, 2, 1]) return masks[:, ::-1, :] def random_horizontal_flip(image, boxes=None, masks=None, keypoints=None, keypoint_flip_permutation=None, seed=None, preprocess_vars_cache=None): """Randomly flips the image and detections horizontally. The probability of flipping the image is 50%. Args: image: rank 3 float32 tensor with shape [height, width, channels]. boxes: (optional) rank 2 float32 tensor with shape [N, 4] containing the bounding boxes. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. keypoint_flip_permutation: rank 1 int32 tensor containing the keypoint flip permutation. seed: random seed preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same shape as input image. If boxes, masks, keypoints, and keypoint_flip_permutation are not None, the function also returns the following tensors. boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. masks: rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. keypoints: rank 3 float32 tensor with shape [num_instances, num_keypoints, 2] Raises: ValueError: if keypoints are provided but keypoint_flip_permutation is not. """ def _flip_image(image): # flip image image_flipped = tf.image.flip_left_right(image) return image_flipped if keypoints is not None and keypoint_flip_permutation is None: raise ValueError( 'keypoints are provided but keypoints_flip_permutation is not provided') with tf.name_scope('RandomHorizontalFlip', values=[image, boxes]): result = [] # random variable defining whether to do flip or not generator_func = functools.partial(tf.random_uniform, [], seed=seed) do_a_flip_random = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.HORIZONTAL_FLIP, preprocess_vars_cache) do_a_flip_random = tf.greater(do_a_flip_random, 0.5) # flip image image = tf.cond(do_a_flip_random, lambda: _flip_image(image), lambda: image) result.append(image) # flip boxes if boxes is not None: boxes = tf.cond(do_a_flip_random, lambda: _flip_boxes_left_right(boxes), lambda: boxes) result.append(boxes) # flip masks if masks is not None: masks = tf.cond(do_a_flip_random, lambda: _flip_masks_left_right(masks), lambda: masks) result.append(masks) # flip keypoints if keypoints is not None and keypoint_flip_permutation is not None: permutation = keypoint_flip_permutation keypoints = tf.cond( do_a_flip_random, lambda: keypoint_ops.flip_horizontal(keypoints, 0.5, permutation), lambda: keypoints) result.append(keypoints) return tuple(result) def random_vertical_flip(image, boxes=None, masks=None, keypoints=None, keypoint_flip_permutation=None, seed=None, preprocess_vars_cache=None): """Randomly flips the image and detections vertically. The probability of flipping the image is 50%. Args: image: rank 3 float32 tensor with shape [height, width, channels]. boxes: (optional) rank 2 float32 tensor with shape [N, 4] containing the bounding boxes. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. keypoint_flip_permutation: rank 1 int32 tensor containing the keypoint flip permutation. seed: random seed preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same shape as input image. If boxes, masks, keypoints, and keypoint_flip_permutation are not None, the function also returns the following tensors. boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. masks: rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. keypoints: rank 3 float32 tensor with shape [num_instances, num_keypoints, 2] Raises: ValueError: if keypoints are provided but keypoint_flip_permutation is not. """ def _flip_image(image): # flip image image_flipped = tf.image.flip_up_down(image) return image_flipped if keypoints is not None and keypoint_flip_permutation is None: raise ValueError( 'keypoints are provided but keypoints_flip_permutation is not provided') with tf.name_scope('RandomVerticalFlip', values=[image, boxes]): result = [] # random variable defining whether to do flip or not generator_func = functools.partial(tf.random_uniform, [], seed=seed) do_a_flip_random = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.VERTICAL_FLIP, preprocess_vars_cache) do_a_flip_random = tf.greater(do_a_flip_random, 0.5) # flip image image = tf.cond(do_a_flip_random, lambda: _flip_image(image), lambda: image) result.append(image) # flip boxes if boxes is not None: boxes = tf.cond(do_a_flip_random, lambda: _flip_boxes_up_down(boxes), lambda: boxes) result.append(boxes) # flip masks if masks is not None: masks = tf.cond(do_a_flip_random, lambda: _flip_masks_up_down(masks), lambda: masks) result.append(masks) # flip keypoints if keypoints is not None and keypoint_flip_permutation is not None: permutation = keypoint_flip_permutation keypoints = tf.cond( do_a_flip_random, lambda: keypoint_ops.flip_vertical(keypoints, 0.5, permutation), lambda: keypoints) result.append(keypoints) return tuple(result) def random_rotation90(image, boxes=None, masks=None, keypoints=None, seed=None, preprocess_vars_cache=None): """Randomly rotates the image and detections 90 degrees counter-clockwise. The probability of rotating the image is 50%. This can be combined with random_horizontal_flip and random_vertical_flip to produce an output with a uniform distribution of the eight possible 90 degree rotation / reflection combinations. Args: image: rank 3 float32 tensor with shape [height, width, channels]. boxes: (optional) rank 2 float32 tensor with shape [N, 4] containing the bounding boxes. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. seed: random seed preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same shape as input image. If boxes, masks, and keypoints, are not None, the function also returns the following tensors. boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. masks: rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. keypoints: rank 3 float32 tensor with shape [num_instances, num_keypoints, 2] """ def _rot90_image(image): # flip image image_rotated = tf.image.rot90(image) return image_rotated with tf.name_scope('RandomRotation90', values=[image, boxes]): result = [] # random variable defining whether to rotate by 90 degrees or not generator_func = functools.partial(tf.random_uniform, [], seed=seed) do_a_rot90_random = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.ROTATION90, preprocess_vars_cache) do_a_rot90_random = tf.greater(do_a_rot90_random, 0.5) # flip image image = tf.cond(do_a_rot90_random, lambda: _rot90_image(image), lambda: image) result.append(image) # flip boxes if boxes is not None: boxes = tf.cond(do_a_rot90_random, lambda: _rot90_boxes(boxes), lambda: boxes) result.append(boxes) # flip masks if masks is not None: masks = tf.cond(do_a_rot90_random, lambda: _rot90_masks(masks), lambda: masks) result.append(masks) # flip keypoints if keypoints is not None: keypoints = tf.cond( do_a_rot90_random, lambda: keypoint_ops.rot90(keypoints), lambda: keypoints) result.append(keypoints) return tuple(result) def random_pixel_value_scale(image, minval=0.9, maxval=1.1, seed=None, preprocess_vars_cache=None): """Scales each value in the pixels of the image. This function scales each pixel independent of the other ones. For each value in image tensor, draws a random number between minval and maxval and multiples the values with them. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 255]. minval: lower ratio of scaling pixel values. maxval: upper ratio of scaling pixel values. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same shape as input image. """ with tf.name_scope('RandomPixelValueScale', values=[image]): generator_func = functools.partial( tf.random_uniform, tf.shape(image), minval=minval, maxval=maxval, dtype=tf.float32, seed=seed) color_coef = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.PIXEL_VALUE_SCALE, preprocess_vars_cache) image = tf.multiply(image, color_coef) image = tf.clip_by_value(image, 0.0, 255.0) return image def random_image_scale(image, masks=None, min_scale_ratio=0.5, max_scale_ratio=2.0, seed=None, preprocess_vars_cache=None): """Scales the image size. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels]. masks: (optional) rank 3 float32 tensor containing masks with size [height, width, num_masks]. The value is set to None if there are no masks. min_scale_ratio: minimum scaling ratio. max_scale_ratio: maximum scaling ratio. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same rank as input image. masks: If masks is not none, resized masks which are the same rank as input masks will be returned. """ with tf.name_scope('RandomImageScale', values=[image]): result = [] image_shape = tf.shape(image) image_height = image_shape[0] image_width = image_shape[1] generator_func = functools.partial( tf.random_uniform, [], minval=min_scale_ratio, maxval=max_scale_ratio, dtype=tf.float32, seed=seed) size_coef = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.IMAGE_SCALE, preprocess_vars_cache) image_newysize = tf.to_int32( tf.multiply(tf.to_float(image_height), size_coef)) image_newxsize = tf.to_int32( tf.multiply(tf.to_float(image_width), size_coef)) image = tf.image.resize_images( image, [image_newysize, image_newxsize], align_corners=True) result.append(image) if masks is not None: masks = tf.image.resize_images( masks, [image_newysize, image_newxsize], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR, align_corners=True) result.append(masks) return tuple(result) def random_rgb_to_gray(image, probability=0.1, seed=None, preprocess_vars_cache=None): """Changes the image from RGB to Grayscale with the given probability. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 255]. probability: the probability of returning a grayscale image. The probability should be a number between [0, 1]. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same shape as input image. """ def _image_to_gray(image): image_gray1 = _rgb_to_grayscale(image) image_gray3 = tf.image.grayscale_to_rgb(image_gray1) return image_gray3 with tf.name_scope('RandomRGBtoGray', values=[image]): # random variable defining whether to change to grayscale or not generator_func = functools.partial(tf.random_uniform, [], seed=seed) do_gray_random = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.RGB_TO_GRAY, preprocess_vars_cache) image = tf.cond( tf.greater(do_gray_random, probability), lambda: image, lambda: _image_to_gray(image)) return image def random_adjust_brightness(image, max_delta=0.2, seed=None, preprocess_vars_cache=None): """Randomly adjusts brightness. Makes sure the output image is still between 0 and 255. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 255]. max_delta: how much to change the brightness. A value between [0, 1). seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same shape as input image. boxes: boxes which is the same shape as input boxes. """ with tf.name_scope('RandomAdjustBrightness', values=[image]): generator_func = functools.partial(tf.random_uniform, [], -max_delta, max_delta, seed=seed) delta = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.ADJUST_BRIGHTNESS, preprocess_vars_cache) image = tf.image.adjust_brightness(image / 255, delta) * 255 image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=255.0) return image def random_adjust_contrast(image, min_delta=0.8, max_delta=1.25, seed=None, preprocess_vars_cache=None): """Randomly adjusts contrast. Makes sure the output image is still between 0 and 255. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 255]. min_delta: see max_delta. max_delta: how much to change the contrast. Contrast will change with a value between min_delta and max_delta. This value will be multiplied to the current contrast of the image. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same shape as input image. """ with tf.name_scope('RandomAdjustContrast', values=[image]): generator_func = functools.partial(tf.random_uniform, [], min_delta, max_delta, seed=seed) contrast_factor = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.ADJUST_CONTRAST, preprocess_vars_cache) image = tf.image.adjust_contrast(image / 255, contrast_factor) * 255 image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=255.0) return image def random_adjust_hue(image, max_delta=0.02, seed=None, preprocess_vars_cache=None): """Randomly adjusts hue. Makes sure the output image is still between 0 and 255. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 255]. max_delta: change hue randomly with a value between 0 and max_delta. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same shape as input image. """ with tf.name_scope('RandomAdjustHue', values=[image]): generator_func = functools.partial(tf.random_uniform, [], -max_delta, max_delta, seed=seed) delta = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.ADJUST_HUE, preprocess_vars_cache) image = tf.image.adjust_hue(image / 255, delta) * 255 image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=255.0) return image def random_adjust_saturation(image, min_delta=0.8, max_delta=1.25, seed=None, preprocess_vars_cache=None): """Randomly adjusts saturation. Makes sure the output image is still between 0 and 255. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 255]. min_delta: see max_delta. max_delta: how much to change the saturation. Saturation will change with a value between min_delta and max_delta. This value will be multiplied to the current saturation of the image. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same shape as input image. """ with tf.name_scope('RandomAdjustSaturation', values=[image]): generator_func = functools.partial(tf.random_uniform, [], min_delta, max_delta, seed=seed) saturation_factor = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.ADJUST_SATURATION, preprocess_vars_cache) image = tf.image.adjust_saturation(image / 255, saturation_factor) * 255 image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=255.0) return image def random_distort_color(image, color_ordering=0, preprocess_vars_cache=None): """Randomly distorts color. Randomly distorts color using a combination of brightness, hue, contrast and saturation changes. Makes sure the output image is still between 0 and 255. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 255]. color_ordering: Python int, a type of distortion (valid values: 0, 1). preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same shape as input image. Raises: ValueError: if color_ordering is not in {0, 1}. """ with tf.name_scope('RandomDistortColor', values=[image]): if color_ordering == 0: image = random_adjust_brightness( image, max_delta=32. / 255., preprocess_vars_cache=preprocess_vars_cache) image = random_adjust_saturation( image, min_delta=0.5, max_delta=1.5, preprocess_vars_cache=preprocess_vars_cache) image = random_adjust_hue( image, max_delta=0.2, preprocess_vars_cache=preprocess_vars_cache) image = random_adjust_contrast( image, min_delta=0.5, max_delta=1.5, preprocess_vars_cache=preprocess_vars_cache) elif color_ordering == 1: image = random_adjust_brightness( image, max_delta=32. / 255., preprocess_vars_cache=preprocess_vars_cache) image = random_adjust_contrast( image, min_delta=0.5, max_delta=1.5, preprocess_vars_cache=preprocess_vars_cache) image = random_adjust_saturation( image, min_delta=0.5, max_delta=1.5, preprocess_vars_cache=preprocess_vars_cache) image = random_adjust_hue( image, max_delta=0.2, preprocess_vars_cache=preprocess_vars_cache) else: raise ValueError('color_ordering must be in {0, 1}') return image def random_jitter_boxes(boxes, ratio=0.05, seed=None): """Randomly jitter boxes in image. Args: boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. ratio: The ratio of the box width and height that the corners can jitter. For example if the width is 100 pixels and ratio is 0.05, the corners can jitter up to 5 pixels in the x direction. seed: random seed. Returns: boxes: boxes which is the same shape as input boxes. """ def random_jitter_box(box, ratio, seed): """Randomly jitter box. Args: box: bounding box [1, 1, 4]. ratio: max ratio between jittered box and original box, a number between [0, 0.5]. seed: random seed. Returns: jittered_box: jittered box. """ rand_numbers = tf.random_uniform( [1, 1, 4], minval=-ratio, maxval=ratio, dtype=tf.float32, seed=seed) box_width = tf.subtract(box[0, 0, 3], box[0, 0, 1]) box_height = tf.subtract(box[0, 0, 2], box[0, 0, 0]) hw_coefs = tf.stack([box_height, box_width, box_height, box_width]) hw_rand_coefs = tf.multiply(hw_coefs, rand_numbers) jittered_box = tf.add(box, hw_rand_coefs) jittered_box = tf.clip_by_value(jittered_box, 0.0, 1.0) return jittered_box with tf.name_scope('RandomJitterBoxes', values=[boxes]): # boxes are [N, 4]. Lets first make them [N, 1, 1, 4] boxes_shape = tf.shape(boxes) boxes = tf.expand_dims(boxes, 1) boxes = tf.expand_dims(boxes, 2) distorted_boxes = tf.map_fn( lambda x: random_jitter_box(x, ratio, seed), boxes, dtype=tf.float32) distorted_boxes = tf.reshape(distorted_boxes, boxes_shape) return distorted_boxes def _strict_random_crop_image(image, boxes, labels, label_weights, label_confidences=None, multiclass_scores=None, masks=None, keypoints=None, min_object_covered=1.0, aspect_ratio_range=(0.75, 1.33), area_range=(0.1, 1.0), overlap_thresh=0.3, clip_boxes=True, preprocess_vars_cache=None): """Performs random crop. Note: Keypoint coordinates that are outside the crop will be set to NaN, which is consistent with the original keypoint encoding for non-existing keypoints. This function always crops the image and is supposed to be used by `random_crop_image` function which sometimes returns the image unchanged. Args: image: rank 3 float32 tensor containing 1 image -> [height, width, channels] with pixel values varying between [0, 1]. boxes: rank 2 float32 tensor containing the bounding boxes with shape [num_instances, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. labels: rank 1 int32 tensor containing the object classes. label_weights: float32 tensor of shape [num_instances] representing the weight for each box. label_confidences: (optional) float32 tensor of shape [num_instances] representing the confidence for each box. multiclass_scores: (optional) float32 tensor of shape [num_instances, num_classes] representing the score for each box for each class. masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. min_object_covered: the cropped image must cover at least this fraction of at least one of the input bounding boxes. aspect_ratio_range: allowed range for aspect ratio of cropped image. area_range: allowed range for area ratio between cropped image and the original image. overlap_thresh: minimum overlap thresh with new cropped image to keep the box. clip_boxes: whether to clip the boxes to the cropped image. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same rank as input image. boxes: boxes which is the same rank as input boxes. Boxes are in normalized form. labels: new labels. If label_weights, multiclass_scores, masks, or keypoints is not None, the function also returns: label_weights: rank 1 float32 tensor with shape [num_instances]. multiclass_scores: rank 2 float32 tensor with shape [num_instances, num_classes] masks: rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. keypoints: rank 3 float32 tensor with shape [num_instances, num_keypoints, 2] """ with tf.name_scope('RandomCropImage', values=[image, boxes]): image_shape = tf.shape(image) # boxes are [N, 4]. Lets first make them [N, 1, 4]. boxes_expanded = tf.expand_dims( tf.clip_by_value( boxes, clip_value_min=0.0, clip_value_max=1.0), 1) generator_func = functools.partial( tf.image.sample_distorted_bounding_box, image_shape, bounding_boxes=boxes_expanded, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=100, use_image_if_no_bounding_boxes=True) # for ssd cropping, each value of min_object_covered has its own # cached random variable sample_distorted_bounding_box = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.STRICT_CROP_IMAGE, preprocess_vars_cache, key=min_object_covered) im_box_begin, im_box_size, im_box = sample_distorted_bounding_box new_image = tf.slice(image, im_box_begin, im_box_size) new_image.set_shape([None, None, image.get_shape()[2]]) # [1, 4] im_box_rank2 = tf.squeeze(im_box, squeeze_dims=[0]) # [4] im_box_rank1 = tf.squeeze(im_box) boxlist = box_list.BoxList(boxes) boxlist.add_field('labels', labels) if label_weights is not None: boxlist.add_field('label_weights', label_weights) if label_confidences is not None: boxlist.add_field('label_confidences', label_confidences) if multiclass_scores is not None: boxlist.add_field('multiclass_scores', multiclass_scores) im_boxlist = box_list.BoxList(im_box_rank2) # remove boxes that are outside cropped image boxlist, inside_window_ids = box_list_ops.prune_completely_outside_window( boxlist, im_box_rank1) # remove boxes that are outside image overlapping_boxlist, keep_ids = box_list_ops.prune_non_overlapping_boxes( boxlist, im_boxlist, overlap_thresh) # change the coordinate of the remaining boxes new_labels = overlapping_boxlist.get_field('labels') new_boxlist = box_list_ops.change_coordinate_frame(overlapping_boxlist, im_box_rank1) new_boxes = new_boxlist.get() if clip_boxes: new_boxes = tf.clip_by_value( new_boxes, clip_value_min=0.0, clip_value_max=1.0) result = [new_image, new_boxes, new_labels] if label_weights is not None: new_label_weights = overlapping_boxlist.get_field('label_weights') result.append(new_label_weights) if label_confidences is not None: new_label_confidences = overlapping_boxlist.get_field('label_confidences') result.append(new_label_confidences) if multiclass_scores is not None: new_multiclass_scores = overlapping_boxlist.get_field('multiclass_scores') result.append(new_multiclass_scores) if masks is not None: masks_of_boxes_inside_window = tf.gather(masks, inside_window_ids) masks_of_boxes_completely_inside_window = tf.gather( masks_of_boxes_inside_window, keep_ids) masks_box_begin = [0, im_box_begin[0], im_box_begin[1]] masks_box_size = [-1, im_box_size[0], im_box_size[1]] new_masks = tf.slice( masks_of_boxes_completely_inside_window, masks_box_begin, masks_box_size) result.append(new_masks) if keypoints is not None: keypoints_of_boxes_inside_window = tf.gather(keypoints, inside_window_ids) keypoints_of_boxes_completely_inside_window = tf.gather( keypoints_of_boxes_inside_window, keep_ids) new_keypoints = keypoint_ops.change_coordinate_frame( keypoints_of_boxes_completely_inside_window, im_box_rank1) if clip_boxes: new_keypoints = keypoint_ops.prune_outside_window(new_keypoints, [0.0, 0.0, 1.0, 1.0]) result.append(new_keypoints) return tuple(result) def random_crop_image(image, boxes, labels, label_weights, label_confidences=None, multiclass_scores=None, masks=None, keypoints=None, min_object_covered=1.0, aspect_ratio_range=(0.75, 1.33), area_range=(0.1, 1.0), overlap_thresh=0.3, clip_boxes=True, random_coef=0.0, seed=None, preprocess_vars_cache=None): """Randomly crops the image. Given the input image and its bounding boxes, this op randomly crops a subimage. Given a user-provided set of input constraints, the crop window is resampled until it satisfies these constraints. If within 100 trials it is unable to find a valid crop, the original image is returned. See the Args section for a description of the input constraints. Both input boxes and returned Boxes are in normalized form (e.g., lie in the unit square [0, 1]). This function will return the original image with probability random_coef. Note: Keypoint coordinates that are outside the crop will be set to NaN, which is consistent with the original keypoint encoding for non-existing keypoints. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 1]. boxes: rank 2 float32 tensor containing the bounding boxes with shape [num_instances, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. labels: rank 1 int32 tensor containing the object classes. label_weights: float32 tensor of shape [num_instances] representing the weight for each box. label_confidences: (optional) float32 tensor of shape [num_instances]. representing the confidence for each box. multiclass_scores: (optional) float32 tensor of shape [num_instances, num_classes] representing the score for each box for each class. masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. min_object_covered: the cropped image must cover at least this fraction of at least one of the input bounding boxes. aspect_ratio_range: allowed range for aspect ratio of cropped image. area_range: allowed range for area ratio between cropped image and the original image. overlap_thresh: minimum overlap thresh with new cropped image to keep the box. clip_boxes: whether to clip the boxes to the cropped image. random_coef: a random coefficient that defines the chance of getting the original image. If random_coef is 0, we will always get the cropped image, and if it is 1.0, we will always get the original image. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: Image shape will be [new_height, new_width, channels]. boxes: boxes which is the same rank as input boxes. Boxes are in normalized form. labels: new labels. If label_weights, multiclass_scores, masks, or keypoints is not None, the function also returns: label_weights: rank 1 float32 tensor with shape [num_instances]. multiclass_scores: rank 2 float32 tensor with shape [num_instances, num_classes] masks: rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. keypoints: rank 3 float32 tensor with shape [num_instances, num_keypoints, 2] """ def strict_random_crop_image_fn(): return _strict_random_crop_image( image, boxes, labels, label_weights, label_confidences=label_confidences, multiclass_scores=multiclass_scores, masks=masks, keypoints=keypoints, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, overlap_thresh=overlap_thresh, clip_boxes=clip_boxes, preprocess_vars_cache=preprocess_vars_cache) # avoids tf.cond to make faster RCNN training on borg. See b/140057645. if random_coef < sys.float_info.min: result = strict_random_crop_image_fn() else: generator_func = functools.partial(tf.random_uniform, [], seed=seed) do_a_crop_random = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.CROP_IMAGE, preprocess_vars_cache) do_a_crop_random = tf.greater(do_a_crop_random, random_coef) outputs = [image, boxes, labels] if label_weights is not None: outputs.append(label_weights) if label_confidences is not None: outputs.append(label_confidences) if multiclass_scores is not None: outputs.append(multiclass_scores) if masks is not None: outputs.append(masks) if keypoints is not None: outputs.append(keypoints) result = tf.cond(do_a_crop_random, strict_random_crop_image_fn, lambda: tuple(outputs)) return result def random_pad_image(image, boxes, min_image_size=None, max_image_size=None, pad_color=None, seed=None, preprocess_vars_cache=None): """Randomly pads the image. This function randomly pads the image with zeros. The final size of the padded image will be between min_image_size and max_image_size. if min_image_size is smaller than the input image size, min_image_size will be set to the input image size. The same for max_image_size. The input image will be located at a uniformly random location inside the padded image. The relative location of the boxes to the original image will remain the same. Args: image: rank 3 float32 tensor containing 1 image -> [height, width, channels] with pixel values varying between [0, 1]. boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. min_image_size: a tensor of size [min_height, min_width], type tf.int32. If passed as None, will be set to image size [height, width]. max_image_size: a tensor of size [max_height, max_width], type tf.int32. If passed as None, will be set to twice the image [height * 2, width * 2]. pad_color: padding color. A rank 1 tensor of [3] with dtype=tf.float32. if set as None, it will be set to average color of the input image. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: Image shape will be [new_height, new_width, channels]. boxes: boxes which is the same rank as input boxes. Boxes are in normalized form. """ if pad_color is None: pad_color = tf.reduce_mean(image, axis=[0, 1]) image_shape = tf.shape(image) image_height = image_shape[0] image_width = image_shape[1] if max_image_size is None: max_image_size = tf.stack([image_height * 2, image_width * 2]) max_image_size = tf.maximum(max_image_size, tf.stack([image_height, image_width])) if min_image_size is None: min_image_size = tf.stack([image_height, image_width]) min_image_size = tf.maximum(min_image_size, tf.stack([image_height, image_width])) target_height = tf.cond( max_image_size[0] > min_image_size[0], lambda: _random_integer(min_image_size[0], max_image_size[0], seed), lambda: max_image_size[0]) target_width = tf.cond( max_image_size[1] > min_image_size[1], lambda: _random_integer(min_image_size[1], max_image_size[1], seed), lambda: max_image_size[1]) offset_height = tf.cond( target_height > image_height, lambda: _random_integer(0, target_height - image_height, seed), lambda: tf.constant(0, dtype=tf.int32)) offset_width = tf.cond( target_width > image_width, lambda: _random_integer(0, target_width - image_width, seed), lambda: tf.constant(0, dtype=tf.int32)) gen_func = lambda: (target_height, target_width, offset_height, offset_width) params = _get_or_create_preprocess_rand_vars( gen_func, preprocessor_cache.PreprocessorCache.PAD_IMAGE, preprocess_vars_cache) target_height, target_width, offset_height, offset_width = params new_image = tf.image.pad_to_bounding_box( image, offset_height=offset_height, offset_width=offset_width, target_height=target_height, target_width=target_width) # Setting color of the padded pixels image_ones = tf.ones_like(image) image_ones_padded = tf.image.pad_to_bounding_box( image_ones, offset_height=offset_height, offset_width=offset_width, target_height=target_height, target_width=target_width) image_color_padded = (1.0 - image_ones_padded) * pad_color new_image += image_color_padded # setting boxes new_window = tf.to_float( tf.stack([ -offset_height, -offset_width, target_height - offset_height, target_width - offset_width ])) new_window /= tf.to_float( tf.stack([image_height, image_width, image_height, image_width])) boxlist = box_list.BoxList(boxes) new_boxlist = box_list_ops.change_coordinate_frame(boxlist, new_window) new_boxes = new_boxlist.get() return new_image, new_boxes def random_crop_pad_image(image, boxes, labels, label_weights, label_confidences=None, multiclass_scores=None, min_object_covered=1.0, aspect_ratio_range=(0.75, 1.33), area_range=(0.1, 1.0), overlap_thresh=0.3, clip_boxes=True, random_coef=0.0, min_padded_size_ratio=(1.0, 1.0), max_padded_size_ratio=(2.0, 2.0), pad_color=None, seed=None, preprocess_vars_cache=None): """Randomly crops and pads the image. Given an input image and its bounding boxes, this op first randomly crops the image and then randomly pads the image with background values. Parameters min_padded_size_ratio and max_padded_size_ratio, determine the range of the final output image size. Specifically, the final image size will have a size in the range of min_padded_size_ratio * tf.shape(image) and max_padded_size_ratio * tf.shape(image). Note that these ratios are with respect to the size of the original image, so we can't capture the same effect easily by independently applying RandomCropImage followed by RandomPadImage. Args: image: rank 3 float32 tensor containing 1 image -> [height, width, channels] with pixel values varying between [0, 1]. boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. labels: rank 1 int32 tensor containing the object classes. label_weights: rank 1 float32 containing the label weights. label_confidences: rank 1 float32 containing the label confidences. multiclass_scores: (optional) float32 tensor of shape [num_instances, num_classes] representing the score for each box for each class. min_object_covered: the cropped image must cover at least this fraction of at least one of the input bounding boxes. aspect_ratio_range: allowed range for aspect ratio of cropped image. area_range: allowed range for area ratio between cropped image and the original image. overlap_thresh: minimum overlap thresh with new cropped image to keep the box. clip_boxes: whether to clip the boxes to the cropped image. random_coef: a random coefficient that defines the chance of getting the original image. If random_coef is 0, we will always get the cropped image, and if it is 1.0, we will always get the original image. min_padded_size_ratio: min ratio of padded image height and width to the input image's height and width. max_padded_size_ratio: max ratio of padded image height and width to the input image's height and width. pad_color: padding color. A rank 1 tensor of [3] with dtype=tf.float32. if set as None, it will be set to average color of the randomly cropped image. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: padded_image: padded image. padded_boxes: boxes which is the same rank as input boxes. Boxes are in normalized form. cropped_labels: cropped labels. if label_weights is not None also returns: cropped_label_weights: cropped label weights. if multiclass_scores is not None also returns: cropped_multiclass_scores: cropped_multiclass_scores. """ image_size = tf.shape(image) image_height = image_size[0] image_width = image_size[1] result = random_crop_image( image=image, boxes=boxes, labels=labels, label_weights=label_weights, label_confidences=label_confidences, multiclass_scores=multiclass_scores, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, overlap_thresh=overlap_thresh, clip_boxes=clip_boxes, random_coef=random_coef, seed=seed, preprocess_vars_cache=preprocess_vars_cache) cropped_image, cropped_boxes, cropped_labels = result[:3] min_image_size = tf.to_int32( tf.to_float(tf.stack([image_height, image_width])) * min_padded_size_ratio) max_image_size = tf.to_int32( tf.to_float(tf.stack([image_height, image_width])) * max_padded_size_ratio) padded_image, padded_boxes = random_pad_image( cropped_image, cropped_boxes, min_image_size=min_image_size, max_image_size=max_image_size, pad_color=pad_color, seed=seed, preprocess_vars_cache=preprocess_vars_cache) cropped_padded_output = (padded_image, padded_boxes, cropped_labels) index = 3 if label_weights is not None: cropped_label_weights = result[index] cropped_padded_output += (cropped_label_weights,) index += 1 if label_confidences is not None: cropped_label_confidences = result[index] cropped_padded_output += (cropped_label_confidences,) index += 1 if multiclass_scores is not None: cropped_multiclass_scores = result[index] cropped_padded_output += (cropped_multiclass_scores,) return cropped_padded_output def random_crop_to_aspect_ratio(image, boxes, labels, label_weights, label_confidences=None, multiclass_scores=None, masks=None, keypoints=None, aspect_ratio=1.0, overlap_thresh=0.3, clip_boxes=True, seed=None, preprocess_vars_cache=None): """Randomly crops an image to the specified aspect ratio. Randomly crops the a portion of the image such that the crop is of the specified aspect ratio, and the crop is as large as possible. If the specified aspect ratio is larger than the aspect ratio of the image, this op will randomly remove rows from the top and bottom of the image. If the specified aspect ratio is less than the aspect ratio of the image, this op will randomly remove cols from the left and right of the image. If the specified aspect ratio is the same as the aspect ratio of the image, this op will return the image. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 1]. boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. labels: rank 1 int32 tensor containing the object classes. label_weights: float32 tensor of shape [num_instances] representing the weight for each box. label_confidences: (optional) float32 tensor of shape [num_instances] representing the confidence for each box. multiclass_scores: (optional) float32 tensor of shape [num_instances, num_classes] representing the score for each box for each class. masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. aspect_ratio: the aspect ratio of cropped image. overlap_thresh: minimum overlap thresh with new cropped image to keep the box. clip_boxes: whether to clip the boxes to the cropped image. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same rank as input image. boxes: boxes which is the same rank as input boxes. Boxes are in normalized form. labels: new labels. If label_weights, masks, keypoints, or multiclass_scores is not None, the function also returns: label_weights: rank 1 float32 tensor with shape [num_instances]. masks: rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. keypoints: rank 3 float32 tensor with shape [num_instances, num_keypoints, 2] multiclass_scores: rank 2 float32 tensor with shape [num_instances, num_classes] Raises: ValueError: If image is not a 3D tensor. """ if len(image.get_shape()) != 3: raise ValueError('Image should be 3D tensor') with tf.name_scope('RandomCropToAspectRatio', values=[image]): image_shape = tf.shape(image) orig_height = image_shape[0] orig_width = image_shape[1] orig_aspect_ratio = tf.to_float(orig_width) / tf.to_float(orig_height) new_aspect_ratio = tf.constant(aspect_ratio, dtype=tf.float32) def target_height_fn(): return tf.to_int32(tf.round(tf.to_float(orig_width) / new_aspect_ratio)) target_height = tf.cond(orig_aspect_ratio >= new_aspect_ratio, lambda: orig_height, target_height_fn) def target_width_fn(): return tf.to_int32(tf.round(tf.to_float(orig_height) * new_aspect_ratio)) target_width = tf.cond(orig_aspect_ratio <= new_aspect_ratio, lambda: orig_width, target_width_fn) # either offset_height = 0 and offset_width is randomly chosen from # [0, offset_width - target_width), or else offset_width = 0 and # offset_height is randomly chosen from [0, offset_height - target_height) offset_height = _random_integer(0, orig_height - target_height + 1, seed) offset_width = _random_integer(0, orig_width - target_width + 1, seed) generator_func = lambda: (offset_height, offset_width) offset_height, offset_width = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.CROP_TO_ASPECT_RATIO, preprocess_vars_cache) new_image = tf.image.crop_to_bounding_box( image, offset_height, offset_width, target_height, target_width) im_box = tf.stack([ tf.to_float(offset_height) / tf.to_float(orig_height), tf.to_float(offset_width) / tf.to_float(orig_width), tf.to_float(offset_height + target_height) / tf.to_float(orig_height), tf.to_float(offset_width + target_width) / tf.to_float(orig_width) ]) boxlist = box_list.BoxList(boxes) boxlist.add_field('labels', labels) boxlist.add_field('label_weights', label_weights) if label_confidences is not None: boxlist.add_field('label_confidences', label_confidences) if multiclass_scores is not None: boxlist.add_field('multiclass_scores', multiclass_scores) im_boxlist = box_list.BoxList(tf.expand_dims(im_box, 0)) # remove boxes whose overlap with the image is less than overlap_thresh overlapping_boxlist, keep_ids = box_list_ops.prune_non_overlapping_boxes( boxlist, im_boxlist, overlap_thresh) # change the coordinate of the remaining boxes new_labels = overlapping_boxlist.get_field('labels') new_boxlist = box_list_ops.change_coordinate_frame(overlapping_boxlist, im_box) if clip_boxes: new_boxlist = box_list_ops.clip_to_window( new_boxlist, tf.constant([0.0, 0.0, 1.0, 1.0], tf.float32)) new_boxes = new_boxlist.get() result = [new_image, new_boxes, new_labels] new_label_weights = overlapping_boxlist.get_field('label_weights') result.append(new_label_weights) if label_confidences is not None: new_label_confidences = ( overlapping_boxlist.get_field('label_confidences')) result.append(new_label_confidences) if multiclass_scores is not None: new_multiclass_scores = overlapping_boxlist.get_field('multiclass_scores') result.append(new_multiclass_scores) if masks is not None: masks_inside_window = tf.gather(masks, keep_ids) masks_box_begin = tf.stack([0, offset_height, offset_width]) masks_box_size = tf.stack([-1, target_height, target_width]) new_masks = tf.slice(masks_inside_window, masks_box_begin, masks_box_size) result.append(new_masks) if keypoints is not None: keypoints_inside_window = tf.gather(keypoints, keep_ids) new_keypoints = keypoint_ops.change_coordinate_frame( keypoints_inside_window, im_box) if clip_boxes: new_keypoints = keypoint_ops.prune_outside_window(new_keypoints, [0.0, 0.0, 1.0, 1.0]) result.append(new_keypoints) return tuple(result) def random_pad_to_aspect_ratio(image, boxes, masks=None, keypoints=None, aspect_ratio=1.0, min_padded_size_ratio=(1.0, 1.0), max_padded_size_ratio=(2.0, 2.0), seed=None, preprocess_vars_cache=None): """Randomly zero pads an image to the specified aspect ratio. Pads the image so that the resulting image will have the specified aspect ratio without scaling less than the min_padded_size_ratio or more than the max_padded_size_ratio. If the min_padded_size_ratio or max_padded_size_ratio is lower than what is possible to maintain the aspect ratio, then this method will use the least padding to achieve the specified aspect ratio. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 1]. boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. aspect_ratio: aspect ratio of the final image. min_padded_size_ratio: min ratio of padded image height and width to the input image's height and width. max_padded_size_ratio: max ratio of padded image height and width to the input image's height and width. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same rank as input image. boxes: boxes which is the same rank as input boxes. Boxes are in normalized form. labels: new labels. If masks, or keypoints is not None, the function also returns: masks: rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. keypoints: rank 3 float32 tensor with shape [num_instances, num_keypoints, 2] Raises: ValueError: If image is not a 3D tensor. """ if len(image.get_shape()) != 3: raise ValueError('Image should be 3D tensor') with tf.name_scope('RandomPadToAspectRatio', values=[image]): image_shape = tf.shape(image) image_height = tf.to_float(image_shape[0]) image_width = tf.to_float(image_shape[1]) image_aspect_ratio = image_width / image_height new_aspect_ratio = tf.constant(aspect_ratio, dtype=tf.float32) target_height = tf.cond( image_aspect_ratio <= new_aspect_ratio, lambda: image_height, lambda: image_width / new_aspect_ratio) target_width = tf.cond( image_aspect_ratio >= new_aspect_ratio, lambda: image_width, lambda: image_height * new_aspect_ratio) min_height = tf.maximum( min_padded_size_ratio[0] * image_height, target_height) min_width = tf.maximum( min_padded_size_ratio[1] * image_width, target_width) max_height = tf.maximum( max_padded_size_ratio[0] * image_height, target_height) max_width = tf.maximum( max_padded_size_ratio[1] * image_width, target_width) max_scale = tf.minimum(max_height / target_height, max_width / target_width) min_scale = tf.minimum( max_scale, tf.maximum(min_height / target_height, min_width / target_width)) generator_func = functools.partial(tf.random_uniform, [], min_scale, max_scale, seed=seed) scale = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.PAD_TO_ASPECT_RATIO, preprocess_vars_cache) target_height = tf.round(scale * target_height) target_width = tf.round(scale * target_width) new_image = tf.image.pad_to_bounding_box( image, 0, 0, tf.to_int32(target_height), tf.to_int32(target_width)) im_box = tf.stack([ 0.0, 0.0, target_height / image_height, target_width / image_width ]) boxlist = box_list.BoxList(boxes) new_boxlist = box_list_ops.change_coordinate_frame(boxlist, im_box) new_boxes = new_boxlist.get() result = [new_image, new_boxes] if masks is not None: new_masks = tf.expand_dims(masks, -1) new_masks = tf.image.pad_to_bounding_box(new_masks, 0, 0, tf.to_int32(target_height), tf.to_int32(target_width)) new_masks = tf.squeeze(new_masks, [-1]) result.append(new_masks) if keypoints is not None: new_keypoints = keypoint_ops.change_coordinate_frame(keypoints, im_box) result.append(new_keypoints) return tuple(result) def random_black_patches(image, max_black_patches=10, probability=0.5, size_to_image_ratio=0.1, random_seed=None, preprocess_vars_cache=None): """Randomly adds some black patches to the image. This op adds up to max_black_patches square black patches of a fixed size to the image where size is specified via the size_to_image_ratio parameter. Args: image: rank 3 float32 tensor containing 1 image -> [height, width, channels] with pixel values varying between [0, 1]. max_black_patches: number of times that the function tries to add a black box to the image. probability: at each try, what is the chance of adding a box. size_to_image_ratio: Determines the ratio of the size of the black patches to the size of the image. box_size = size_to_image_ratio * min(image_width, image_height) random_seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image """ def add_black_patch_to_image(image, idx): """Function for adding one patch to the image. Args: image: image idx: counter for number of patches that could have been added Returns: image with a randomly added black box """ image_shape = tf.shape(image) image_height = image_shape[0] image_width = image_shape[1] box_size = tf.to_int32( tf.multiply( tf.minimum(tf.to_float(image_height), tf.to_float(image_width)), size_to_image_ratio)) generator_func = functools.partial(tf.random_uniform, [], minval=0.0, maxval=(1.0 - size_to_image_ratio), seed=random_seed) normalized_y_min = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.ADD_BLACK_PATCH, preprocess_vars_cache, key=str(idx) + 'y') normalized_x_min = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.ADD_BLACK_PATCH, preprocess_vars_cache, key=str(idx) + 'x') y_min = tf.to_int32(normalized_y_min * tf.to_float(image_height)) x_min = tf.to_int32(normalized_x_min * tf.to_float(image_width)) black_box = tf.ones([box_size, box_size, 3], dtype=tf.float32) mask = 1.0 - tf.image.pad_to_bounding_box(black_box, y_min, x_min, image_height, image_width) image = tf.multiply(image, mask) return image with tf.name_scope('RandomBlackPatchInImage', values=[image]): for idx in range(max_black_patches): generator_func = functools.partial(tf.random_uniform, [], minval=0.0, maxval=1.0, dtype=tf.float32, seed=random_seed) random_prob = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.BLACK_PATCHES, preprocess_vars_cache, key=idx) image = tf.cond( tf.greater(random_prob, probability), lambda: image, functools.partial(add_black_patch_to_image, image=image, idx=idx)) return image def image_to_float(image): """Used in Faster R-CNN. Casts image pixel values to float. Args: image: input image which might be in tf.uint8 or sth else format Returns: image: image in tf.float32 format. """ with tf.name_scope('ImageToFloat', values=[image]): image = tf.to_float(image) return image def random_resize_method(image, target_size, preprocess_vars_cache=None): """Uses a random resize method to resize the image to target size. Args: image: a rank 3 tensor. target_size: a list of [target_height, target_width] preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: resized image. """ resized_image = _apply_with_random_selector( image, lambda x, method: tf.image.resize_images(x, target_size, method), num_cases=4, preprocess_vars_cache=preprocess_vars_cache, key=preprocessor_cache.PreprocessorCache.RESIZE_METHOD) return resized_image def _compute_new_static_size(image, min_dimension, max_dimension): """Compute new static shape for resize_to_range method.""" image_shape = image.get_shape().as_list() orig_height = image_shape[0] orig_width = image_shape[1] num_channels = image_shape[2] orig_min_dim = min(orig_height, orig_width) # Calculates the larger of the possible sizes large_scale_factor = min_dimension / float(orig_min_dim) # Scaling orig_(height|width) by large_scale_factor will make the smaller # dimension equal to min_dimension, save for floating point rounding errors. # For reasonably-sized images, taking the nearest integer will reliably # eliminate this error. large_height = int(round(orig_height * large_scale_factor)) large_width = int(round(orig_width * large_scale_factor)) large_size = [large_height, large_width] if max_dimension: # Calculates the smaller of the possible sizes, use that if the larger # is too big. orig_max_dim = max(orig_height, orig_width) small_scale_factor = max_dimension / float(orig_max_dim) # Scaling orig_(height|width) by small_scale_factor will make the larger # dimension equal to max_dimension, save for floating point rounding # errors. For reasonably-sized images, taking the nearest integer will # reliably eliminate this error. small_height = int(round(orig_height * small_scale_factor)) small_width = int(round(orig_width * small_scale_factor)) small_size = [small_height, small_width] new_size = large_size if max(large_size) > max_dimension: new_size = small_size else: new_size = large_size return tf.constant(new_size + [num_channels]) def _compute_new_dynamic_size(image, min_dimension, max_dimension): """Compute new dynamic shape for resize_to_range method.""" image_shape = tf.shape(image) orig_height = tf.to_float(image_shape[0]) orig_width = tf.to_float(image_shape[1]) num_channels = image_shape[2] orig_min_dim = tf.minimum(orig_height, orig_width) # Calculates the larger of the possible sizes min_dimension = tf.constant(min_dimension, dtype=tf.float32) large_scale_factor = min_dimension / orig_min_dim # Scaling orig_(height|width) by large_scale_factor will make the smaller # dimension equal to min_dimension, save for floating point rounding errors. # For reasonably-sized images, taking the nearest integer will reliably # eliminate this error. large_height = tf.to_int32(tf.round(orig_height * large_scale_factor)) large_width = tf.to_int32(tf.round(orig_width * large_scale_factor)) large_size = tf.stack([large_height, large_width]) if max_dimension: # Calculates the smaller of the possible sizes, use that if the larger # is too big. orig_max_dim = tf.maximum(orig_height, orig_width) max_dimension = tf.constant(max_dimension, dtype=tf.float32) small_scale_factor = max_dimension / orig_max_dim # Scaling orig_(height|width) by small_scale_factor will make the larger # dimension equal to max_dimension, save for floating point rounding # errors. For reasonably-sized images, taking the nearest integer will # reliably eliminate this error. small_height = tf.to_int32(tf.round(orig_height * small_scale_factor)) small_width = tf.to_int32(tf.round(orig_width * small_scale_factor)) small_size = tf.stack([small_height, small_width]) new_size = tf.cond( tf.to_float(tf.reduce_max(large_size)) > max_dimension, lambda: small_size, lambda: large_size) else: new_size = large_size return tf.stack(tf.unstack(new_size) + [num_channels]) def resize_to_range(image, masks=None, min_dimension=None, max_dimension=None, method=tf.image.ResizeMethod.BILINEAR, align_corners=False, pad_to_max_dimension=False, per_channel_pad_value=(0, 0, 0)): """Resizes an image so its dimensions are within the provided value. The output size can be described by two cases: 1. If the image can be rescaled so its minimum dimension is equal to the provided value without the other dimension exceeding max_dimension, then do so. 2. Otherwise, resize so the largest dimension is equal to max_dimension. Args: image: A 3D tensor of shape [height, width, channels] masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. min_dimension: (optional) (scalar) desired size of the smaller image dimension. max_dimension: (optional) (scalar) maximum allowed size of the larger image dimension. method: (optional) interpolation method used in resizing. Defaults to BILINEAR. align_corners: bool. If true, exactly align all 4 corners of the input and output. Defaults to False. pad_to_max_dimension: Whether to resize the image and pad it with zeros so the resulting image is of the spatial size [max_dimension, max_dimension]. If masks are included they are padded similarly. per_channel_pad_value: A tuple of per-channel scalar value to use for padding. By default pads zeros. Returns: Note that the position of the resized_image_shape changes based on whether masks are present. resized_image: A 3D tensor of shape [new_height, new_width, channels], where the image has been resized (with bilinear interpolation) so that min(new_height, new_width) == min_dimension or max(new_height, new_width) == max_dimension. resized_masks: If masks is not None, also outputs masks. A 3D tensor of shape [num_instances, new_height, new_width]. resized_image_shape: A 1D tensor of shape [3] containing shape of the resized image. Raises: ValueError: if the image is not a 3D tensor. """ if len(image.get_shape()) != 3: raise ValueError('Image should be 3D tensor') with tf.name_scope('ResizeToRange', values=[image, min_dimension]): if image.get_shape().is_fully_defined(): new_size = _compute_new_static_size(image, min_dimension, max_dimension) else: new_size = _compute_new_dynamic_size(image, min_dimension, max_dimension) new_image = tf.image.resize_images( image, new_size[:-1], method=method, align_corners=align_corners) if pad_to_max_dimension: channels = tf.unstack(new_image, axis=2) if len(channels) != len(per_channel_pad_value): raise ValueError('Number of channels must be equal to the length of ' 'per-channel pad value.') new_image = tf.stack( [ tf.pad( channels[i], [[0, max_dimension - new_size[0]], [0, max_dimension - new_size[1]]], constant_values=per_channel_pad_value[i]) for i in range(len(channels)) ], axis=2) new_image.set_shape([max_dimension, max_dimension, 3]) result = [new_image] if masks is not None: new_masks = tf.expand_dims(masks, 3) new_masks = tf.image.resize_images( new_masks, new_size[:-1], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR, align_corners=align_corners) if pad_to_max_dimension: new_masks = tf.image.pad_to_bounding_box( new_masks, 0, 0, max_dimension, max_dimension) new_masks = tf.squeeze(new_masks, 3) result.append(new_masks) result.append(new_size) return result # TODO(alirezafathi): Make sure the static shapes are preserved. def resize_to_min_dimension(image, masks=None, min_dimension=600): """Resizes image and masks given the min size maintaining the aspect ratio. If one of the image dimensions is smaller that min_dimension, it will scale the image such that its smallest dimension is equal to min_dimension. Otherwise, will keep the image size as is. Args: image: a tensor of size [height, width, channels]. masks: (optional) a tensors of size [num_instances, height, width]. min_dimension: minimum image dimension. Returns: Note that the position of the resized_image_shape changes based on whether masks are present. resized_image: A tensor of size [new_height, new_width, channels]. resized_masks: If masks is not None, also outputs masks. A 3D tensor of shape [num_instances, new_height, new_width] resized_image_shape: A 1D tensor of shape [3] containing the shape of the resized image. Raises: ValueError: if the image is not a 3D tensor. """ if len(image.get_shape()) != 3: raise ValueError('Image should be 3D tensor') with tf.name_scope('ResizeGivenMinDimension', values=[image, min_dimension]): image_height = tf.shape(image)[0] image_width = tf.shape(image)[1] num_channels = tf.shape(image)[2] min_image_dimension = tf.minimum(image_height, image_width) min_target_dimension = tf.maximum(min_image_dimension, min_dimension) target_ratio = tf.to_float(min_target_dimension) / tf.to_float( min_image_dimension) target_height = tf.to_int32(tf.to_float(image_height) * target_ratio) target_width = tf.to_int32(tf.to_float(image_width) * target_ratio) image = tf.image.resize_bilinear( tf.expand_dims(image, axis=0), size=[target_height, target_width], align_corners=True) result = [tf.squeeze(image, axis=0)] if masks is not None: masks = tf.image.resize_nearest_neighbor( tf.expand_dims(masks, axis=3), size=[target_height, target_width], align_corners=True) result.append(tf.squeeze(masks, axis=3)) result.append(tf.stack([target_height, target_width, num_channels])) return result def scale_boxes_to_pixel_coordinates(image, boxes, keypoints=None): """Scales boxes from normalized to pixel coordinates. Args: image: A 3D float32 tensor of shape [height, width, channels]. boxes: A 2D float32 tensor of shape [num_boxes, 4] containing the bounding boxes in normalized coordinates. Each row is of the form [ymin, xmin, ymax, xmax]. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. Returns: image: unchanged input image. scaled_boxes: a 2D float32 tensor of shape [num_boxes, 4] containing the bounding boxes in pixel coordinates. scaled_keypoints: a 3D float32 tensor with shape [num_instances, num_keypoints, 2] containing the keypoints in pixel coordinates. """ boxlist = box_list.BoxList(boxes) image_height = tf.shape(image)[0] image_width = tf.shape(image)[1] scaled_boxes = box_list_ops.scale(boxlist, image_height, image_width).get() result = [image, scaled_boxes] if keypoints is not None: scaled_keypoints = keypoint_ops.scale(keypoints, image_height, image_width) result.append(scaled_keypoints) return tuple(result) # TODO(alirezafathi): Investigate if instead the function should return None if # masks is None. # pylint: disable=g-doc-return-or-yield def resize_image(image, masks=None, new_height=600, new_width=1024, method=tf.image.ResizeMethod.BILINEAR, align_corners=False): """Resizes images to the given height and width. Args: image: A 3D tensor of shape [height, width, channels] masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. new_height: (optional) (scalar) desired height of the image. new_width: (optional) (scalar) desired width of the image. method: (optional) interpolation method used in resizing. Defaults to BILINEAR. align_corners: bool. If true, exactly align all 4 corners of the input and output. Defaults to False. Returns: Note that the position of the resized_image_shape changes based on whether masks are present. resized_image: A tensor of size [new_height, new_width, channels]. resized_masks: If masks is not None, also outputs masks. A 3D tensor of shape [num_instances, new_height, new_width] resized_image_shape: A 1D tensor of shape [3] containing the shape of the resized image. """ with tf.name_scope( 'ResizeImage', values=[image, new_height, new_width, method, align_corners]): new_image = tf.image.resize_images( image, tf.stack([new_height, new_width]), method=method, align_corners=align_corners) image_shape = shape_utils.combined_static_and_dynamic_shape(image) result = [new_image] if masks is not None: num_instances = tf.shape(masks)[0] new_size = tf.stack([new_height, new_width]) def resize_masks_branch(): new_masks = tf.expand_dims(masks, 3) new_masks = tf.image.resize_nearest_neighbor( new_masks, new_size, align_corners=align_corners) new_masks = tf.squeeze(new_masks, axis=3) return new_masks def reshape_masks_branch(): # The shape function will be computed for both branches of the # condition, regardless of which branch is actually taken. Make sure # that we don't trigger an assertion in the shape function when trying # to reshape a non empty tensor into an empty one. new_masks = tf.reshape(masks, [-1, new_size[0], new_size[1]]) return new_masks masks = tf.cond(num_instances > 0, resize_masks_branch, reshape_masks_branch) result.append(masks) result.append(tf.stack([new_height, new_width, image_shape[2]])) return result def subtract_channel_mean(image, means=None): """Normalizes an image by subtracting a mean from each channel. Args: image: A 3D tensor of shape [height, width, channels] means: float list containing a mean for each channel Returns: normalized_images: a tensor of shape [height, width, channels] Raises: ValueError: if images is not a 4D tensor or if the number of means is not equal to the number of channels. """ with tf.name_scope('SubtractChannelMean', values=[image, means]): if len(image.get_shape()) != 3: raise ValueError('Input must be of size [height, width, channels]') if len(means) != image.get_shape()[-1]: raise ValueError('len(means) must match the number of channels') return image - [[means]] def one_hot_encoding(labels, num_classes=None): """One-hot encodes the multiclass labels. Example usage: labels = tf.constant([1, 4], dtype=tf.int32) one_hot = OneHotEncoding(labels, num_classes=5) one_hot.eval() # evaluates to [0, 1, 0, 0, 1] Args: labels: A tensor of shape [None] corresponding to the labels. num_classes: Number of classes in the dataset. Returns: onehot_labels: a tensor of shape [num_classes] corresponding to the one hot encoding of the labels. Raises: ValueError: if num_classes is not specified. """ with tf.name_scope('OneHotEncoding', values=[labels]): if num_classes is None: raise ValueError('num_classes must be specified') labels = tf.one_hot(labels, num_classes, 1, 0) return tf.reduce_max(labels, 0) def rgb_to_gray(image): """Converts a 3 channel RGB image to a 1 channel grayscale image. Args: image: Rank 3 float32 tensor containing 1 image -> [height, width, 3] with pixel values varying between [0, 1]. Returns: image: A single channel grayscale image -> [image, height, 1]. """ return _rgb_to_grayscale(image) def ssd_random_crop(image, boxes, labels, label_weights, label_confidences=None, multiclass_scores=None, masks=None, keypoints=None, min_object_covered=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0), aspect_ratio_range=((0.5, 2.0),) * 7, area_range=((0.1, 1.0),) * 7, overlap_thresh=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0), clip_boxes=(True,) * 7, random_coef=(0.15,) * 7, seed=None, preprocess_vars_cache=None): """Random crop preprocessing with default parameters as in SSD paper. Liu et al., SSD: Single shot multibox detector. For further information on random crop preprocessing refer to RandomCrop function above. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 1]. boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. labels: rank 1 int32 tensor containing the object classes. label_weights: rank 1 float32 tensor containing the weights. label_confidences: rank 1 float32 tensor containing the confidences. multiclass_scores: (optional) float32 tensor of shape [num_instances, num_classes] representing the score for each box for each class. masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. min_object_covered: the cropped image must cover at least this fraction of at least one of the input bounding boxes. aspect_ratio_range: allowed range for aspect ratio of cropped image. area_range: allowed range for area ratio between cropped image and the original image. overlap_thresh: minimum overlap thresh with new cropped image to keep the box. clip_boxes: whether to clip the boxes to the cropped image. random_coef: a random coefficient that defines the chance of getting the original image. If random_coef is 0, we will always get the cropped image, and if it is 1.0, we will always get the original image. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same rank as input image. boxes: boxes which is the same rank as input boxes. Boxes are in normalized form. labels: new labels. If label_weights, multiclass_scores, masks, or keypoints is not None, the function also returns: label_weights: rank 1 float32 tensor with shape [num_instances]. multiclass_scores: rank 2 float32 tensor with shape [num_instances, num_classes] masks: rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. keypoints: rank 3 float32 tensor with shape [num_instances, num_keypoints, 2] """ def random_crop_selector(selected_result, index): """Applies random_crop_image to selected result. Args: selected_result: A tuple containing image, boxes, labels, keypoints (if not None), and masks (if not None). index: The index that was randomly selected. Returns: A tuple containing image, boxes, labels, keypoints (if not None), and masks (if not None). """ i = 3 image, boxes, labels = selected_result[:i] selected_label_weights = None selected_label_confidences = None selected_multiclass_scores = None selected_masks = None selected_keypoints = None if label_weights is not None: selected_label_weights = selected_result[i] i += 1 if label_confidences is not None: selected_label_confidences = selected_result[i] i += 1 if multiclass_scores is not None: selected_multiclass_scores = selected_result[i] i += 1 if masks is not None: selected_masks = selected_result[i] i += 1 if keypoints is not None: selected_keypoints = selected_result[i] return random_crop_image( image=image, boxes=boxes, labels=labels, label_weights=selected_label_weights, label_confidences=selected_label_confidences, multiclass_scores=selected_multiclass_scores, masks=selected_masks, keypoints=selected_keypoints, min_object_covered=min_object_covered[index], aspect_ratio_range=aspect_ratio_range[index], area_range=area_range[index], overlap_thresh=overlap_thresh[index], clip_boxes=clip_boxes[index], random_coef=random_coef[index], seed=seed, preprocess_vars_cache=preprocess_vars_cache) result = _apply_with_random_selector_tuples( tuple( t for t in (image, boxes, labels, label_weights, label_confidences, multiclass_scores, masks, keypoints) if t is not None), random_crop_selector, num_cases=len(min_object_covered), preprocess_vars_cache=preprocess_vars_cache, key=preprocessor_cache.PreprocessorCache.SSD_CROP_SELECTOR_ID) return result def ssd_random_crop_pad(image, boxes, labels, label_weights, label_confidences=None, multiclass_scores=None, min_object_covered=(0.1, 0.3, 0.5, 0.7, 0.9, 1.0), aspect_ratio_range=((0.5, 2.0),) * 6, area_range=((0.1, 1.0),) * 6, overlap_thresh=(0.1, 0.3, 0.5, 0.7, 0.9, 1.0), clip_boxes=(True,) * 6, random_coef=(0.15,) * 6, min_padded_size_ratio=((1.0, 1.0),) * 6, max_padded_size_ratio=((2.0, 2.0),) * 6, pad_color=(None,) * 6, seed=None, preprocess_vars_cache=None): """Random crop preprocessing with default parameters as in SSD paper. Liu et al., SSD: Single shot multibox detector. For further information on random crop preprocessing refer to RandomCrop function above. Args: image: rank 3 float32 tensor containing 1 image -> [height, width, channels] with pixel values varying between [0, 1]. boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. labels: rank 1 int32 tensor containing the object classes. label_weights: float32 tensor of shape [num_instances] representing the weight for each box. label_confidences: float32 tensor of shape [num_instances] representing the confidences for each box. multiclass_scores: (optional) float32 tensor of shape [num_instances, num_classes] representing the score for each box for each class. min_object_covered: the cropped image must cover at least this fraction of at least one of the input bounding boxes. aspect_ratio_range: allowed range for aspect ratio of cropped image. area_range: allowed range for area ratio between cropped image and the original image. overlap_thresh: minimum overlap thresh with new cropped image to keep the box. clip_boxes: whether to clip the boxes to the cropped image. random_coef: a random coefficient that defines the chance of getting the original image. If random_coef is 0, we will always get the cropped image, and if it is 1.0, we will always get the original image. min_padded_size_ratio: min ratio of padded image height and width to the input image's height and width. max_padded_size_ratio: max ratio of padded image height and width to the input image's height and width. pad_color: padding color. A rank 1 tensor of [3] with dtype=tf.float32. if set as None, it will be set to average color of the randomly cropped image. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: Image shape will be [new_height, new_width, channels]. boxes: boxes which is the same rank as input boxes. Boxes are in normalized form. new_labels: new labels. new_label_weights: new label weights. """ def random_crop_pad_selector(image_boxes_labels, index): """Random crop preprocessing helper.""" i = 3 image, boxes, labels = image_boxes_labels[:i] selected_label_weights = None selected_label_confidences = None selected_multiclass_scores = None if label_weights is not None: selected_label_weights = image_boxes_labels[i] i += 1 if label_confidences is not None: selected_label_confidences = image_boxes_labels[i] i += 1 if multiclass_scores is not None: selected_multiclass_scores = image_boxes_labels[i] return random_crop_pad_image( image, boxes, labels, label_weights=selected_label_weights, label_confidences=selected_label_confidences, multiclass_scores=selected_multiclass_scores, min_object_covered=min_object_covered[index], aspect_ratio_range=aspect_ratio_range[index], area_range=area_range[index], overlap_thresh=overlap_thresh[index], clip_boxes=clip_boxes[index], random_coef=random_coef[index], min_padded_size_ratio=min_padded_size_ratio[index], max_padded_size_ratio=max_padded_size_ratio[index], pad_color=pad_color[index], seed=seed, preprocess_vars_cache=preprocess_vars_cache) return _apply_with_random_selector_tuples( tuple(t for t in (image, boxes, labels, label_weights, label_confidences, multiclass_scores) if t is not None), random_crop_pad_selector, num_cases=len(min_object_covered), preprocess_vars_cache=preprocess_vars_cache, key=preprocessor_cache.PreprocessorCache.SSD_CROP_PAD_SELECTOR_ID) def ssd_random_crop_fixed_aspect_ratio( image, boxes, labels, label_weights, label_confidences=None, multiclass_scores=None, masks=None, keypoints=None, min_object_covered=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0), aspect_ratio=1.0, area_range=((0.1, 1.0),) * 7, overlap_thresh=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0), clip_boxes=(True,) * 7, random_coef=(0.15,) * 7, seed=None, preprocess_vars_cache=None): """Random crop preprocessing with default parameters as in SSD paper. Liu et al., SSD: Single shot multibox detector. For further information on random crop preprocessing refer to RandomCrop function above. The only difference is that the aspect ratio of the crops are fixed. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 1]. boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. labels: rank 1 int32 tensor containing the object classes. label_weights: float32 tensor of shape [num_instances] representing the weight for each box. label_confidences: (optional) float32 tensor of shape [num_instances] representing the confidences for each box. multiclass_scores: (optional) float32 tensor of shape [num_instances, num_classes] representing the score for each box for each class. masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. min_object_covered: the cropped image must cover at least this fraction of at least one of the input bounding boxes. aspect_ratio: aspect ratio of the cropped image. area_range: allowed range for area ratio between cropped image and the original image. overlap_thresh: minimum overlap thresh with new cropped image to keep the box. clip_boxes: whether to clip the boxes to the cropped image. random_coef: a random coefficient that defines the chance of getting the original image. If random_coef is 0, we will always get the cropped image, and if it is 1.0, we will always get the original image. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same rank as input image. boxes: boxes which is the same rank as input boxes. Boxes are in normalized form. labels: new labels. If mulitclass_scores, masks, or keypoints is not None, the function also returns: multiclass_scores: rank 2 float32 tensor with shape [num_instances, num_classes] masks: rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. keypoints: rank 3 float32 tensor with shape [num_instances, num_keypoints, 2] """ aspect_ratio_range = ((aspect_ratio, aspect_ratio),) * len(area_range) crop_result = ssd_random_crop( image, boxes, labels, label_weights=label_weights, label_confidences=label_confidences, multiclass_scores=multiclass_scores, masks=masks, keypoints=keypoints, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, overlap_thresh=overlap_thresh, clip_boxes=clip_boxes, random_coef=random_coef, seed=seed, preprocess_vars_cache=preprocess_vars_cache) i = 3 new_image, new_boxes, new_labels = crop_result[:i] new_label_weights = None new_label_confidences = None new_multiclass_scores = None new_masks = None new_keypoints = None if label_weights is not None: new_label_weights = crop_result[i] i += 1 if label_confidences is not None: new_label_confidences = crop_result[i] i += 1 if multiclass_scores is not None: new_multiclass_scores = crop_result[i] i += 1 if masks is not None: new_masks = crop_result[i] i += 1 if keypoints is not None: new_keypoints = crop_result[i] result = random_crop_to_aspect_ratio( new_image, new_boxes, new_labels, label_weights=new_label_weights, label_confidences=new_label_confidences, multiclass_scores=new_multiclass_scores, masks=new_masks, keypoints=new_keypoints, aspect_ratio=aspect_ratio, clip_boxes=clip_boxes, seed=seed, preprocess_vars_cache=preprocess_vars_cache) return result def ssd_random_crop_pad_fixed_aspect_ratio( image, boxes, labels, label_weights, label_confidences=None, multiclass_scores=None, masks=None, keypoints=None, min_object_covered=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0), aspect_ratio=1.0, aspect_ratio_range=((0.5, 2.0),) * 7, area_range=((0.1, 1.0),) * 7, overlap_thresh=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0), clip_boxes=(True,) * 7, random_coef=(0.15,) * 7, min_padded_size_ratio=(1.0, 1.0), max_padded_size_ratio=(2.0, 2.0), seed=None, preprocess_vars_cache=None): """Random crop and pad preprocessing with default parameters as in SSD paper. Liu et al., SSD: Single shot multibox detector. For further information on random crop preprocessing refer to RandomCrop function above. The only difference is that after the initial crop, images are zero-padded to a fixed aspect ratio instead of being resized to that aspect ratio. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 1]. boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. labels: rank 1 int32 tensor containing the object classes. label_weights: float32 tensor of shape [num_instances] representing the weight for each box. label_confidences: (optional) float32 tensor of shape [num_instances] representing the confidence for each box. multiclass_scores: (optional) float32 tensor of shape [num_instances, num_classes] representing the score for each box for each class. masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. min_object_covered: the cropped image must cover at least this fraction of at least one of the input bounding boxes. aspect_ratio: the final aspect ratio to pad to. aspect_ratio_range: allowed range for aspect ratio of cropped image. area_range: allowed range for area ratio between cropped image and the original image. overlap_thresh: minimum overlap thresh with new cropped image to keep the box. clip_boxes: whether to clip the boxes to the cropped image. random_coef: a random coefficient that defines the chance of getting the original image. If random_coef is 0, we will always get the cropped image, and if it is 1.0, we will always get the original image. min_padded_size_ratio: min ratio of padded image height and width to the input image's height and width. max_padded_size_ratio: max ratio of padded image height and width to the input image's height and width. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same rank as input image. boxes: boxes which is the same rank as input boxes. Boxes are in normalized form. labels: new labels. If multiclass_scores, masks, or keypoints is not None, the function also returns: multiclass_scores: rank 2 with shape [num_instances, num_classes] masks: rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. keypoints: rank 3 float32 tensor with shape [num_instances, num_keypoints, 2] """ crop_result = ssd_random_crop( image, boxes, labels, label_weights=label_weights, label_confidences=label_confidences, multiclass_scores=multiclass_scores, masks=masks, keypoints=keypoints, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, overlap_thresh=overlap_thresh, clip_boxes=clip_boxes, random_coef=random_coef, seed=seed, preprocess_vars_cache=preprocess_vars_cache) i = 3 new_image, new_boxes, new_labels = crop_result[:i] new_label_weights = None new_label_confidences = None new_multiclass_scores = None new_masks = None new_keypoints = None if label_weights is not None: new_label_weights = crop_result[i] i += 1 if label_confidences is not None: new_label_confidences = crop_result[i] i += 1 if multiclass_scores is not None: new_multiclass_scores = crop_result[i] i += 1 if masks is not None: new_masks = crop_result[i] i += 1 if keypoints is not None: new_keypoints = crop_result[i] result = random_pad_to_aspect_ratio( new_image, new_boxes, masks=new_masks, keypoints=new_keypoints, aspect_ratio=aspect_ratio, min_padded_size_ratio=min_padded_size_ratio, max_padded_size_ratio=max_padded_size_ratio, seed=seed, preprocess_vars_cache=preprocess_vars_cache) result = list(result) i = 3 result.insert(2, new_labels) if new_label_weights is not None: result.insert(i, new_label_weights) i += 1 if new_label_confidences is not None: result.insert(i, new_label_confidences) i += 1 if multiclass_scores is not None: result.insert(i, new_multiclass_scores) result = tuple(result) return result def convert_class_logits_to_softmax(multiclass_scores, temperature=1.0): """Converts multiclass logits to softmax scores after applying temperature. Args: multiclass_scores: float32 tensor of shape [num_instances, num_classes] representing the score for each box for each class. temperature: Scale factor to use prior to applying softmax. Larger temperatures give more uniform distruibutions after softmax. Returns: multiclass_scores: float32 tensor of shape [num_instances, num_classes] with scaling and softmax applied. """ # Multiclass scores must be stored as logits. Apply temp and softmax. multiclass_scores_scaled = tf.divide( multiclass_scores, temperature, name='scale_logits') multiclass_scores = tf.nn.softmax(multiclass_scores_scaled, name='softmax') return multiclass_scores def get_default_func_arg_map(include_label_weights=True, include_label_confidences=False, include_multiclass_scores=False, include_instance_masks=False, include_keypoints=False): """Returns the default mapping from a preprocessor function to its args. Args: include_label_weights: If True, preprocessing functions will modify the label weights, too. include_label_confidences: If True, preprocessing functions will modify the label confidences, too. include_multiclass_scores: If True, preprocessing functions will modify the multiclass scores, too. include_instance_masks: If True, preprocessing functions will modify the instance masks, too. include_keypoints: If True, preprocessing functions will modify the keypoints, too. Returns: A map from preprocessing functions to the arguments they receive. """ groundtruth_label_weights = None if include_label_weights: groundtruth_label_weights = ( fields.InputDataFields.groundtruth_weights) groundtruth_label_confidences = None if include_label_confidences: groundtruth_label_confidences = ( fields.InputDataFields.groundtruth_confidences) multiclass_scores = None if include_multiclass_scores: multiclass_scores = (fields.InputDataFields.multiclass_scores) groundtruth_instance_masks = None if include_instance_masks: groundtruth_instance_masks = ( fields.InputDataFields.groundtruth_instance_masks) groundtruth_keypoints = None if include_keypoints: groundtruth_keypoints = fields.InputDataFields.groundtruth_keypoints prep_func_arg_map = { normalize_image: (fields.InputDataFields.image,), random_horizontal_flip: ( fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, groundtruth_instance_masks, groundtruth_keypoints, ), random_vertical_flip: ( fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, groundtruth_instance_masks, groundtruth_keypoints, ), random_rotation90: ( fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, groundtruth_instance_masks, groundtruth_keypoints, ), random_pixel_value_scale: (fields.InputDataFields.image,), random_image_scale: ( fields.InputDataFields.image, groundtruth_instance_masks, ), random_rgb_to_gray: (fields.InputDataFields.image,), random_adjust_brightness: (fields.InputDataFields.image,), random_adjust_contrast: (fields.InputDataFields.image,), random_adjust_hue: (fields.InputDataFields.image,), random_adjust_saturation: (fields.InputDataFields.image,), random_distort_color: (fields.InputDataFields.image,), random_jitter_boxes: (fields.InputDataFields.groundtruth_boxes,), random_crop_image: (fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_classes, groundtruth_label_weights, groundtruth_label_confidences, multiclass_scores, groundtruth_instance_masks, groundtruth_keypoints), random_pad_image: (fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes), random_crop_pad_image: (fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_classes, groundtruth_label_weights, groundtruth_label_confidences, multiclass_scores), random_crop_to_aspect_ratio: ( fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_classes, groundtruth_label_weights, groundtruth_label_confidences, multiclass_scores, groundtruth_instance_masks, groundtruth_keypoints, ), random_pad_to_aspect_ratio: ( fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, groundtruth_instance_masks, groundtruth_keypoints, ), random_black_patches: (fields.InputDataFields.image,), retain_boxes_above_threshold: ( fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_classes, groundtruth_label_weights, groundtruth_label_confidences, multiclass_scores, groundtruth_instance_masks, groundtruth_keypoints, ), image_to_float: (fields.InputDataFields.image,), random_resize_method: (fields.InputDataFields.image,), resize_to_range: ( fields.InputDataFields.image, groundtruth_instance_masks, ), resize_to_min_dimension: ( fields.InputDataFields.image, groundtruth_instance_masks, ), scale_boxes_to_pixel_coordinates: ( fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, groundtruth_keypoints, ), resize_image: ( fields.InputDataFields.image, groundtruth_instance_masks, ), subtract_channel_mean: (fields.InputDataFields.image,), one_hot_encoding: (fields.InputDataFields.groundtruth_image_classes,), rgb_to_gray: (fields.InputDataFields.image,), ssd_random_crop: (fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_classes, groundtruth_label_weights, groundtruth_label_confidences, multiclass_scores, groundtruth_instance_masks, groundtruth_keypoints), ssd_random_crop_pad: (fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_classes, groundtruth_label_weights, groundtruth_label_confidences, multiclass_scores), ssd_random_crop_fixed_aspect_ratio: ( fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_classes, groundtruth_label_weights, groundtruth_label_confidences, multiclass_scores, groundtruth_instance_masks, groundtruth_keypoints), ssd_random_crop_pad_fixed_aspect_ratio: ( fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_classes, groundtruth_label_weights, groundtruth_label_confidences, multiclass_scores, groundtruth_instance_masks, groundtruth_keypoints, ), convert_class_logits_to_softmax: (multiclass_scores,), } return prep_func_arg_map def preprocess(tensor_dict, preprocess_options, func_arg_map=None, preprocess_vars_cache=None): """Preprocess images and bounding boxes. Various types of preprocessing (to be implemented) based on the preprocess_options dictionary e.g. "crop image" (affects image and possibly boxes), "white balance image" (affects only image), etc. If self._options is None, no preprocessing is done. Args: tensor_dict: dictionary that contains images, boxes, and can contain other things as well. images-> rank 4 float32 tensor contains 1 image -> [1, height, width, 3]. with pixel values varying between [0, 1] boxes-> rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. preprocess_options: It is a list of tuples, where each tuple contains a function and a dictionary that contains arguments and their values. func_arg_map: mapping from preprocessing functions to arguments that they expect to receive and return. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: tensor_dict: which contains the preprocessed images, bounding boxes, etc. Raises: ValueError: (a) If the functions passed to Preprocess are not in func_arg_map. (b) If the arguments that a function needs do not exist in tensor_dict. (c) If image in tensor_dict is not rank 4 """ if func_arg_map is None: func_arg_map = get_default_func_arg_map() # changes the images to image (rank 4 to rank 3) since the functions # receive rank 3 tensor for image if fields.InputDataFields.image in tensor_dict: images = tensor_dict[fields.InputDataFields.image] if len(images.get_shape()) != 4: raise ValueError('images in tensor_dict should be rank 4') image = tf.squeeze(images, axis=0) tensor_dict[fields.InputDataFields.image] = image # Preprocess inputs based on preprocess_options for option in preprocess_options: func, params = option if func not in func_arg_map: raise ValueError('The function %s does not exist in func_arg_map' % (func.__name__)) arg_names = func_arg_map[func] for a in arg_names: if a is not None and a not in tensor_dict: raise ValueError('The function %s requires argument %s' % (func.__name__, a)) def get_arg(key): return tensor_dict[key] if key is not None else None args = [get_arg(a) for a in arg_names] if (preprocess_vars_cache is not None and 'preprocess_vars_cache' in inspect.getargspec(func).args): params['preprocess_vars_cache'] = preprocess_vars_cache results = func(*args, **params) if not isinstance(results, (list, tuple)): results = (results,) # Removes None args since the return values will not contain those. arg_names = [arg_name for arg_name in arg_names if arg_name is not None] for res, arg_name in zip(results, arg_names): tensor_dict[arg_name] = res # changes the image to images (rank 3 to rank 4) to be compatible to what # we received in the first place if fields.InputDataFields.image in tensor_dict: image = tensor_dict[fields.InputDataFields.image] images = tf.expand_dims(image, 0) tensor_dict[fields.InputDataFields.image] = images return tensor_dict
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/core/preprocessor.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Interface for data parsers. Data parser parses input data and returns a dictionary of numpy arrays keyed by the entries in standard_fields.py. Since the parser parses records to numpy arrays (materialized tensors) directly, it is used to read data for evaluation/visualization; to parse the data during training, DataDecoder should be used. """ from abc import ABCMeta from abc import abstractmethod class DataToNumpyParser(object): __metaclass__ = ABCMeta @abstractmethod def parse(self, input_data): """Parses input and returns a numpy array or a dictionary of numpy arrays. Args: input_data: an input data Returns: A numpy array or a dictionary of numpy arrays or None, if input cannot be parsed. """ pass
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/core/data_parser.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Provides functions to prefetch tensors to feed into models.""" import tensorflow as tf def prefetch(tensor_dict, capacity): """Creates a prefetch queue for tensors. Creates a FIFO queue to asynchronously enqueue tensor_dicts and returns a dequeue op that evaluates to a tensor_dict. This function is useful in prefetching preprocessed tensors so that the data is readily available for consumers. Example input pipeline when you don't need batching: ---------------------------------------------------- key, string_tensor = slim.parallel_reader.parallel_read(...) tensor_dict = decoder.decode(string_tensor) tensor_dict = preprocessor.preprocess(tensor_dict, ...) prefetch_queue = prefetcher.prefetch(tensor_dict, capacity=20) tensor_dict = prefetch_queue.dequeue() outputs = Model(tensor_dict) ... ---------------------------------------------------- For input pipelines with batching, refer to core/batcher.py Args: tensor_dict: a dictionary of tensors to prefetch. capacity: the size of the prefetch queue. Returns: a FIFO prefetcher queue """ names = list(tensor_dict.keys()) dtypes = [t.dtype for t in tensor_dict.values()] shapes = [t.get_shape() for t in tensor_dict.values()] prefetch_queue = tf.PaddingFIFOQueue(capacity, dtypes=dtypes, shapes=shapes, names=names, name='prefetch_queue') enqueue_op = prefetch_queue.enqueue(tensor_dict) tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner( prefetch_queue, [enqueue_op])) tf.summary.scalar('queue/%s/fraction_of_%d_full' % (prefetch_queue.name, capacity), tf.to_float(prefetch_queue.size()) * (1. / capacity)) return prefetch_queue
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/core/prefetcher.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Bounding Box List operations. Example box operations that are supported: * areas: compute bounding box areas * iou: pairwise intersection-over-union scores * sq_dist: pairwise distances between bounding boxes Whenever box_list_ops functions output a BoxList, the fields of the incoming BoxList are retained unless documented otherwise. """ import tensorflow as tf from object_detection.core import box_list from object_detection.utils import ops from object_detection.utils import shape_utils class SortOrder(object): """Enum class for sort order. Attributes: ascend: ascend order. descend: descend order. """ ascend = 1 descend = 2 def area(boxlist, scope=None): """Computes area of boxes. Args: boxlist: BoxList holding N boxes scope: name scope. Returns: a tensor with shape [N] representing box areas. """ with tf.name_scope(scope, 'Area'): y_min, x_min, y_max, x_max = tf.split( value=boxlist.get(), num_or_size_splits=4, axis=1) return tf.squeeze((y_max - y_min) * (x_max - x_min), [1]) def height_width(boxlist, scope=None): """Computes height and width of boxes in boxlist. Args: boxlist: BoxList holding N boxes scope: name scope. Returns: Height: A tensor with shape [N] representing box heights. Width: A tensor with shape [N] representing box widths. """ with tf.name_scope(scope, 'HeightWidth'): y_min, x_min, y_max, x_max = tf.split( value=boxlist.get(), num_or_size_splits=4, axis=1) return tf.squeeze(y_max - y_min, [1]), tf.squeeze(x_max - x_min, [1]) def scale(boxlist, y_scale, x_scale, scope=None): """scale box coordinates in x and y dimensions. Args: boxlist: BoxList holding N boxes y_scale: (float) scalar tensor x_scale: (float) scalar tensor scope: name scope. Returns: boxlist: BoxList holding N boxes """ with tf.name_scope(scope, 'Scale'): y_scale = tf.cast(y_scale, tf.float32) x_scale = tf.cast(x_scale, tf.float32) y_min, x_min, y_max, x_max = tf.split( value=boxlist.get(), num_or_size_splits=4, axis=1) y_min = y_scale * y_min y_max = y_scale * y_max x_min = x_scale * x_min x_max = x_scale * x_max scaled_boxlist = box_list.BoxList( tf.concat([y_min, x_min, y_max, x_max], 1)) return _copy_extra_fields(scaled_boxlist, boxlist) def clip_to_window(boxlist, window, filter_nonoverlapping=True, scope=None): """Clip bounding boxes to a window. This op clips any input bounding boxes (represented by bounding box corners) to a window, optionally filtering out boxes that do not overlap at all with the window. Args: boxlist: BoxList holding M_in boxes window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max] window to which the op should clip boxes. filter_nonoverlapping: whether to filter out boxes that do not overlap at all with the window. scope: name scope. Returns: a BoxList holding M_out boxes where M_out <= M_in """ with tf.name_scope(scope, 'ClipToWindow'): y_min, x_min, y_max, x_max = tf.split( value=boxlist.get(), num_or_size_splits=4, axis=1) win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window) y_min_clipped = tf.maximum(tf.minimum(y_min, win_y_max), win_y_min) y_max_clipped = tf.maximum(tf.minimum(y_max, win_y_max), win_y_min) x_min_clipped = tf.maximum(tf.minimum(x_min, win_x_max), win_x_min) x_max_clipped = tf.maximum(tf.minimum(x_max, win_x_max), win_x_min) clipped = box_list.BoxList( tf.concat([y_min_clipped, x_min_clipped, y_max_clipped, x_max_clipped], 1)) clipped = _copy_extra_fields(clipped, boxlist) if filter_nonoverlapping: areas = area(clipped) nonzero_area_indices = tf.cast( tf.reshape(tf.where(tf.greater(areas, 0.0)), [-1]), tf.int32) clipped = gather(clipped, nonzero_area_indices) return clipped def prune_outside_window(boxlist, window, scope=None): """Prunes bounding boxes that fall outside a given window. This function prunes bounding boxes that even partially fall outside the given window. See also clip_to_window which only prunes bounding boxes that fall completely outside the window, and clips any bounding boxes that partially overflow. Args: boxlist: a BoxList holding M_in boxes. window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax] of the window scope: name scope. Returns: pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes in the input tensor. """ with tf.name_scope(scope, 'PruneOutsideWindow'): y_min, x_min, y_max, x_max = tf.split( value=boxlist.get(), num_or_size_splits=4, axis=1) win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window) coordinate_violations = tf.concat([ tf.less(y_min, win_y_min), tf.less(x_min, win_x_min), tf.greater(y_max, win_y_max), tf.greater(x_max, win_x_max) ], 1) valid_indices = tf.reshape( tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1]) return gather(boxlist, valid_indices), valid_indices def prune_completely_outside_window(boxlist, window, scope=None): """Prunes bounding boxes that fall completely outside of the given window. The function clip_to_window prunes bounding boxes that fall completely outside the window, but also clips any bounding boxes that partially overflow. This function does not clip partially overflowing boxes. Args: boxlist: a BoxList holding M_in boxes. window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax] of the window scope: name scope. Returns: pruned_boxlist: a new BoxList with all bounding boxes partially or fully in the window. valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes in the input tensor. """ with tf.name_scope(scope, 'PruneCompleteleyOutsideWindow'): y_min, x_min, y_max, x_max = tf.split( value=boxlist.get(), num_or_size_splits=4, axis=1) win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window) coordinate_violations = tf.concat([ tf.greater_equal(y_min, win_y_max), tf.greater_equal(x_min, win_x_max), tf.less_equal(y_max, win_y_min), tf.less_equal(x_max, win_x_min) ], 1) valid_indices = tf.reshape( tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1]) return gather(boxlist, valid_indices), valid_indices def intersection(boxlist1, boxlist2, scope=None): """Compute pairwise intersection areas between boxes. Args: boxlist1: BoxList holding N boxes boxlist2: BoxList holding M boxes scope: name scope. Returns: a tensor with shape [N, M] representing pairwise intersections """ with tf.name_scope(scope, 'Intersection'): y_min1, x_min1, y_max1, x_max1 = tf.split( value=boxlist1.get(), num_or_size_splits=4, axis=1) y_min2, x_min2, y_max2, x_max2 = tf.split( value=boxlist2.get(), num_or_size_splits=4, axis=1) all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2)) all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2)) intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin) all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(x_max2)) all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(x_min2)) intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin) return intersect_heights * intersect_widths def matched_intersection(boxlist1, boxlist2, scope=None): """Compute intersection areas between corresponding boxes in two boxlists. Args: boxlist1: BoxList holding N boxes boxlist2: BoxList holding N boxes scope: name scope. Returns: a tensor with shape [N] representing pairwise intersections """ with tf.name_scope(scope, 'MatchedIntersection'): y_min1, x_min1, y_max1, x_max1 = tf.split( value=boxlist1.get(), num_or_size_splits=4, axis=1) y_min2, x_min2, y_max2, x_max2 = tf.split( value=boxlist2.get(), num_or_size_splits=4, axis=1) min_ymax = tf.minimum(y_max1, y_max2) max_ymin = tf.maximum(y_min1, y_min2) intersect_heights = tf.maximum(0.0, min_ymax - max_ymin) min_xmax = tf.minimum(x_max1, x_max2) max_xmin = tf.maximum(x_min1, x_min2) intersect_widths = tf.maximum(0.0, min_xmax - max_xmin) return tf.reshape(intersect_heights * intersect_widths, [-1]) def iou(boxlist1, boxlist2, scope=None): """Computes pairwise intersection-over-union between box collections. Args: boxlist1: BoxList holding N boxes boxlist2: BoxList holding M boxes scope: name scope. Returns: a tensor with shape [N, M] representing pairwise iou scores. """ with tf.name_scope(scope, 'IOU'): intersections = intersection(boxlist1, boxlist2) areas1 = area(boxlist1) areas2 = area(boxlist2) unions = ( tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections) return tf.where( tf.equal(intersections, 0.0), tf.zeros_like(intersections), tf.truediv(intersections, unions)) def matched_iou(boxlist1, boxlist2, scope=None): """Compute intersection-over-union between corresponding boxes in boxlists. Args: boxlist1: BoxList holding N boxes boxlist2: BoxList holding N boxes scope: name scope. Returns: a tensor with shape [N] representing pairwise iou scores. """ with tf.name_scope(scope, 'MatchedIOU'): intersections = matched_intersection(boxlist1, boxlist2) areas1 = area(boxlist1) areas2 = area(boxlist2) unions = areas1 + areas2 - intersections return tf.where( tf.equal(intersections, 0.0), tf.zeros_like(intersections), tf.truediv(intersections, unions)) def ioa(boxlist1, boxlist2, scope=None): """Computes pairwise intersection-over-area between box collections. intersection-over-area (IOA) between two boxes box1 and box2 is defined as their intersection area over box2's area. Note that ioa is not symmetric, that is, ioa(box1, box2) != ioa(box2, box1). Args: boxlist1: BoxList holding N boxes boxlist2: BoxList holding M boxes scope: name scope. Returns: a tensor with shape [N, M] representing pairwise ioa scores. """ with tf.name_scope(scope, 'IOA'): intersections = intersection(boxlist1, boxlist2) areas = tf.expand_dims(area(boxlist2), 0) return tf.truediv(intersections, areas) def prune_non_overlapping_boxes( boxlist1, boxlist2, min_overlap=0.0, scope=None): """Prunes the boxes in boxlist1 that overlap less than thresh with boxlist2. For each box in boxlist1, we want its IOA to be more than minoverlap with at least one of the boxes in boxlist2. If it does not, we remove it. Args: boxlist1: BoxList holding N boxes. boxlist2: BoxList holding M boxes. min_overlap: Minimum required overlap between boxes, to count them as overlapping. scope: name scope. Returns: new_boxlist1: A pruned boxlist with size [N', 4]. keep_inds: A tensor with shape [N'] indexing kept bounding boxes in the first input BoxList `boxlist1`. """ with tf.name_scope(scope, 'PruneNonOverlappingBoxes'): ioa_ = ioa(boxlist2, boxlist1) # [M, N] tensor ioa_ = tf.reduce_max(ioa_, reduction_indices=[0]) # [N] tensor keep_bool = tf.greater_equal(ioa_, tf.constant(min_overlap)) keep_inds = tf.squeeze(tf.where(keep_bool), squeeze_dims=[1]) new_boxlist1 = gather(boxlist1, keep_inds) return new_boxlist1, keep_inds def prune_small_boxes(boxlist, min_side, scope=None): """Prunes small boxes in the boxlist which have a side smaller than min_side. Args: boxlist: BoxList holding N boxes. min_side: Minimum width AND height of box to survive pruning. scope: name scope. Returns: A pruned boxlist. """ with tf.name_scope(scope, 'PruneSmallBoxes'): height, width = height_width(boxlist) is_valid = tf.logical_and(tf.greater_equal(width, min_side), tf.greater_equal(height, min_side)) return gather(boxlist, tf.reshape(tf.where(is_valid), [-1])) def change_coordinate_frame(boxlist, window, scope=None): """Change coordinate frame of the boxlist to be relative to window's frame. Given a window of the form [ymin, xmin, ymax, xmax], changes bounding box coordinates from boxlist to be relative to this window (e.g., the min corner maps to (0,0) and the max corner maps to (1,1)). An example use case is data augmentation: where we are given groundtruth boxes (boxlist) and would like to randomly crop the image to some window (window). In this case we need to change the coordinate frame of each groundtruth box to be relative to this new window. Args: boxlist: A BoxList object holding N boxes. window: A rank 1 tensor [4]. scope: name scope. Returns: Returns a BoxList object with N boxes. """ with tf.name_scope(scope, 'ChangeCoordinateFrame'): win_height = window[2] - window[0] win_width = window[3] - window[1] boxlist_new = scale(box_list.BoxList( boxlist.get() - [window[0], window[1], window[0], window[1]]), 1.0 / win_height, 1.0 / win_width) boxlist_new = _copy_extra_fields(boxlist_new, boxlist) return boxlist_new def sq_dist(boxlist1, boxlist2, scope=None): """Computes the pairwise squared distances between box corners. This op treats each box as if it were a point in a 4d Euclidean space and computes pairwise squared distances. Mathematically, we are given two matrices of box coordinates X and Y, where X(i,:) is the i'th row of X, containing the 4 numbers defining the corners of the i'th box in boxlist1. Similarly Y(j,:) corresponds to boxlist2. We compute Z(i,j) = ||X(i,:) - Y(j,:)||^2 = ||X(i,:)||^2 + ||Y(j,:)||^2 - 2 X(i,:)' * Y(j,:), Args: boxlist1: BoxList holding N boxes boxlist2: BoxList holding M boxes scope: name scope. Returns: a tensor with shape [N, M] representing pairwise distances """ with tf.name_scope(scope, 'SqDist'): sqnorm1 = tf.reduce_sum(tf.square(boxlist1.get()), 1, keep_dims=True) sqnorm2 = tf.reduce_sum(tf.square(boxlist2.get()), 1, keep_dims=True) innerprod = tf.matmul(boxlist1.get(), boxlist2.get(), transpose_a=False, transpose_b=True) return sqnorm1 + tf.transpose(sqnorm2) - 2.0 * innerprod def boolean_mask(boxlist, indicator, fields=None, scope=None, use_static_shapes=False, indicator_sum=None): """Select boxes from BoxList according to indicator and return new BoxList. `boolean_mask` returns the subset of boxes that are marked as "True" by the indicator tensor. By default, `boolean_mask` returns boxes corresponding to the input index list, as well as all additional fields stored in the boxlist (indexing into the first dimension). However one can optionally only draw from a subset of fields. Args: boxlist: BoxList holding N boxes indicator: a rank-1 boolean tensor fields: (optional) list of fields to also gather from. If None (default), all fields are gathered from. Pass an empty fields list to only gather the box coordinates. scope: name scope. use_static_shapes: Whether to use an implementation with static shape gurantees. indicator_sum: An integer containing the sum of `indicator` vector. Only required if `use_static_shape` is True. Returns: subboxlist: a BoxList corresponding to the subset of the input BoxList specified by indicator Raises: ValueError: if `indicator` is not a rank-1 boolean tensor. """ with tf.name_scope(scope, 'BooleanMask'): if indicator.shape.ndims != 1: raise ValueError('indicator should have rank 1') if indicator.dtype != tf.bool: raise ValueError('indicator should be a boolean tensor') if use_static_shapes: if not (indicator_sum and isinstance(indicator_sum, int)): raise ValueError('`indicator_sum` must be a of type int') selected_positions = tf.to_float(indicator) indexed_positions = tf.cast( tf.multiply( tf.cumsum(selected_positions), selected_positions), dtype=tf.int32) one_hot_selector = tf.one_hot( indexed_positions - 1, indicator_sum, dtype=tf.float32) sampled_indices = tf.cast( tf.tensordot( tf.to_float(tf.range(tf.shape(indicator)[0])), one_hot_selector, axes=[0, 0]), dtype=tf.int32) return gather(boxlist, sampled_indices, use_static_shapes=True) else: subboxlist = box_list.BoxList(tf.boolean_mask(boxlist.get(), indicator)) if fields is None: fields = boxlist.get_extra_fields() for field in fields: if not boxlist.has_field(field): raise ValueError('boxlist must contain all specified fields') subfieldlist = tf.boolean_mask(boxlist.get_field(field), indicator) subboxlist.add_field(field, subfieldlist) return subboxlist def gather(boxlist, indices, fields=None, scope=None, use_static_shapes=False): """Gather boxes from BoxList according to indices and return new BoxList. By default, `gather` returns boxes corresponding to the input index list, as well as all additional fields stored in the boxlist (indexing into the first dimension). However one can optionally only gather from a subset of fields. Args: boxlist: BoxList holding N boxes indices: a rank-1 tensor of type int32 / int64 fields: (optional) list of fields to also gather from. If None (default), all fields are gathered from. Pass an empty fields list to only gather the box coordinates. scope: name scope. use_static_shapes: Whether to use an implementation with static shape gurantees. Returns: subboxlist: a BoxList corresponding to the subset of the input BoxList specified by indices Raises: ValueError: if specified field is not contained in boxlist or if the indices are not of type int32 """ with tf.name_scope(scope, 'Gather'): if len(indices.shape.as_list()) != 1: raise ValueError('indices should have rank 1') if indices.dtype != tf.int32 and indices.dtype != tf.int64: raise ValueError('indices should be an int32 / int64 tensor') gather_op = tf.gather if use_static_shapes: gather_op = ops.matmul_gather_on_zeroth_axis subboxlist = box_list.BoxList(gather_op(boxlist.get(), indices)) if fields is None: fields = boxlist.get_extra_fields() fields += ['boxes'] for field in fields: if not boxlist.has_field(field): raise ValueError('boxlist must contain all specified fields') subfieldlist = gather_op(boxlist.get_field(field), indices) subboxlist.add_field(field, subfieldlist) return subboxlist def concatenate(boxlists, fields=None, scope=None): """Concatenate list of BoxLists. This op concatenates a list of input BoxLists into a larger BoxList. It also handles concatenation of BoxList fields as long as the field tensor shapes are equal except for the first dimension. Args: boxlists: list of BoxList objects fields: optional list of fields to also concatenate. By default, all fields from the first BoxList in the list are included in the concatenation. scope: name scope. Returns: a BoxList with number of boxes equal to sum([boxlist.num_boxes() for boxlist in BoxList]) Raises: ValueError: if boxlists is invalid (i.e., is not a list, is empty, or contains non BoxList objects), or if requested fields are not contained in all boxlists """ with tf.name_scope(scope, 'Concatenate'): if not isinstance(boxlists, list): raise ValueError('boxlists should be a list') if not boxlists: raise ValueError('boxlists should have nonzero length') for boxlist in boxlists: if not isinstance(boxlist, box_list.BoxList): raise ValueError('all elements of boxlists should be BoxList objects') concatenated = box_list.BoxList( tf.concat([boxlist.get() for boxlist in boxlists], 0)) if fields is None: fields = boxlists[0].get_extra_fields() for field in fields: first_field_shape = boxlists[0].get_field(field).get_shape().as_list() first_field_shape[0] = -1 if None in first_field_shape: raise ValueError('field %s must have fully defined shape except for the' ' 0th dimension.' % field) for boxlist in boxlists: if not boxlist.has_field(field): raise ValueError('boxlist must contain all requested fields') field_shape = boxlist.get_field(field).get_shape().as_list() field_shape[0] = -1 if field_shape != first_field_shape: raise ValueError('field %s must have same shape for all boxlists ' 'except for the 0th dimension.' % field) concatenated_field = tf.concat( [boxlist.get_field(field) for boxlist in boxlists], 0) concatenated.add_field(field, concatenated_field) return concatenated def sort_by_field(boxlist, field, order=SortOrder.descend, scope=None): """Sort boxes and associated fields according to a scalar field. A common use case is reordering the boxes according to descending scores. Args: boxlist: BoxList holding N boxes. field: A BoxList field for sorting and reordering the BoxList. order: (Optional) descend or ascend. Default is descend. scope: name scope. Returns: sorted_boxlist: A sorted BoxList with the field in the specified order. Raises: ValueError: if specified field does not exist ValueError: if the order is not either descend or ascend """ with tf.name_scope(scope, 'SortByField'): if order != SortOrder.descend and order != SortOrder.ascend: raise ValueError('Invalid sort order') field_to_sort = boxlist.get_field(field) if len(field_to_sort.shape.as_list()) != 1: raise ValueError('Field should have rank 1') num_boxes = boxlist.num_boxes() num_entries = tf.size(field_to_sort) length_assert = tf.Assert( tf.equal(num_boxes, num_entries), ['Incorrect field size: actual vs expected.', num_entries, num_boxes]) with tf.control_dependencies([length_assert]): _, sorted_indices = tf.nn.top_k(field_to_sort, num_boxes, sorted=True) if order == SortOrder.ascend: sorted_indices = tf.reverse_v2(sorted_indices, [0]) return gather(boxlist, sorted_indices) def visualize_boxes_in_image(image, boxlist, normalized=False, scope=None): """Overlay bounding box list on image. Currently this visualization plots a 1 pixel thick red bounding box on top of the image. Note that tf.image.draw_bounding_boxes essentially is 1 indexed. Args: image: an image tensor with shape [height, width, 3] boxlist: a BoxList normalized: (boolean) specify whether corners are to be interpreted as absolute coordinates in image space or normalized with respect to the image size. scope: name scope. Returns: image_and_boxes: an image tensor with shape [height, width, 3] """ with tf.name_scope(scope, 'VisualizeBoxesInImage'): if not normalized: height, width, _ = tf.unstack(tf.shape(image)) boxlist = scale(boxlist, 1.0 / tf.cast(height, tf.float32), 1.0 / tf.cast(width, tf.float32)) corners = tf.expand_dims(boxlist.get(), 0) image = tf.expand_dims(image, 0) return tf.squeeze(tf.image.draw_bounding_boxes(image, corners), [0]) def filter_field_value_equals(boxlist, field, value, scope=None): """Filter to keep only boxes with field entries equal to the given value. Args: boxlist: BoxList holding N boxes. field: field name for filtering. value: scalar value. scope: name scope. Returns: a BoxList holding M boxes where M <= N Raises: ValueError: if boxlist not a BoxList object or if it does not have the specified field. """ with tf.name_scope(scope, 'FilterFieldValueEquals'): if not isinstance(boxlist, box_list.BoxList): raise ValueError('boxlist must be a BoxList') if not boxlist.has_field(field): raise ValueError('boxlist must contain the specified field') filter_field = boxlist.get_field(field) gather_index = tf.reshape(tf.where(tf.equal(filter_field, value)), [-1]) return gather(boxlist, gather_index) def filter_greater_than(boxlist, thresh, scope=None): """Filter to keep only boxes with score exceeding a given threshold. This op keeps the collection of boxes whose corresponding scores are greater than the input threshold. TODO(jonathanhuang): Change function name to filter_scores_greater_than Args: boxlist: BoxList holding N boxes. Must contain a 'scores' field representing detection scores. thresh: scalar threshold scope: name scope. Returns: a BoxList holding M boxes where M <= N Raises: ValueError: if boxlist not a BoxList object or if it does not have a scores field """ with tf.name_scope(scope, 'FilterGreaterThan'): if not isinstance(boxlist, box_list.BoxList): raise ValueError('boxlist must be a BoxList') if not boxlist.has_field('scores'): raise ValueError('input boxlist must have \'scores\' field') scores = boxlist.get_field('scores') if len(scores.shape.as_list()) > 2: raise ValueError('Scores should have rank 1 or 2') if len(scores.shape.as_list()) == 2 and scores.shape.as_list()[1] != 1: raise ValueError('Scores should have rank 1 or have shape ' 'consistent with [None, 1]') high_score_indices = tf.cast(tf.reshape( tf.where(tf.greater(scores, thresh)), [-1]), tf.int32) return gather(boxlist, high_score_indices) def non_max_suppression(boxlist, thresh, max_output_size, scope=None): """Non maximum suppression. This op greedily selects a subset of detection bounding boxes, pruning away boxes that have high IOU (intersection over union) overlap (> thresh) with already selected boxes. Note that this only works for a single class --- to apply NMS to multi-class predictions, use MultiClassNonMaxSuppression. Args: boxlist: BoxList holding N boxes. Must contain a 'scores' field representing detection scores. thresh: scalar threshold max_output_size: maximum number of retained boxes scope: name scope. Returns: a BoxList holding M boxes where M <= max_output_size Raises: ValueError: if thresh is not in [0, 1] """ with tf.name_scope(scope, 'NonMaxSuppression'): if not 0 <= thresh <= 1.0: raise ValueError('thresh must be between 0 and 1') if not isinstance(boxlist, box_list.BoxList): raise ValueError('boxlist must be a BoxList') if not boxlist.has_field('scores'): raise ValueError('input boxlist must have \'scores\' field') with tf.device('/CPU:0'): selected_indices = tf.image.non_max_suppression( boxlist.get(), boxlist.get_field('scores'), max_output_size, iou_threshold=thresh) return gather(boxlist, selected_indices) def _copy_extra_fields(boxlist_to_copy_to, boxlist_to_copy_from): """Copies the extra fields of boxlist_to_copy_from to boxlist_to_copy_to. Args: boxlist_to_copy_to: BoxList to which extra fields are copied. boxlist_to_copy_from: BoxList from which fields are copied. Returns: boxlist_to_copy_to with extra fields. """ for field in boxlist_to_copy_from.get_extra_fields(): boxlist_to_copy_to.add_field(field, boxlist_to_copy_from.get_field(field)) return boxlist_to_copy_to def to_normalized_coordinates(boxlist, height, width, check_range=True, scope=None): """Converts absolute box coordinates to normalized coordinates in [0, 1]. Usually one uses the dynamic shape of the image or conv-layer tensor: boxlist = box_list_ops.to_normalized_coordinates(boxlist, tf.shape(images)[1], tf.shape(images)[2]), This function raises an assertion failed error at graph execution time when the maximum coordinate is smaller than 1.01 (which means that coordinates are already normalized). The value 1.01 is to deal with small rounding errors. Args: boxlist: BoxList with coordinates in terms of pixel-locations. height: Maximum value for height of absolute box coordinates. width: Maximum value for width of absolute box coordinates. check_range: If True, checks if the coordinates are normalized or not. scope: name scope. Returns: boxlist with normalized coordinates in [0, 1]. """ with tf.name_scope(scope, 'ToNormalizedCoordinates'): height = tf.cast(height, tf.float32) width = tf.cast(width, tf.float32) if check_range: max_val = tf.reduce_max(boxlist.get()) max_assert = tf.Assert(tf.greater(max_val, 1.01), ['max value is lower than 1.01: ', max_val]) with tf.control_dependencies([max_assert]): width = tf.identity(width) return scale(boxlist, 1 / height, 1 / width) def to_absolute_coordinates(boxlist, height, width, check_range=True, maximum_normalized_coordinate=1.1, scope=None): """Converts normalized box coordinates to absolute pixel coordinates. This function raises an assertion failed error when the maximum box coordinate value is larger than maximum_normalized_coordinate (in which case coordinates are already absolute). Args: boxlist: BoxList with coordinates in range [0, 1]. height: Maximum value for height of absolute box coordinates. width: Maximum value for width of absolute box coordinates. check_range: If True, checks if the coordinates are normalized or not. maximum_normalized_coordinate: Maximum coordinate value to be considered as normalized, default to 1.1. scope: name scope. Returns: boxlist with absolute coordinates in terms of the image size. """ with tf.name_scope(scope, 'ToAbsoluteCoordinates'): height = tf.cast(height, tf.float32) width = tf.cast(width, tf.float32) # Ensure range of input boxes is correct. if check_range: box_maximum = tf.reduce_max(boxlist.get()) max_assert = tf.Assert( tf.greater_equal(maximum_normalized_coordinate, box_maximum), ['maximum box coordinate value is larger ' 'than %f: ' % maximum_normalized_coordinate, box_maximum]) with tf.control_dependencies([max_assert]): width = tf.identity(width) return scale(boxlist, height, width) def refine_boxes_multi_class(pool_boxes, num_classes, nms_iou_thresh, nms_max_detections, voting_iou_thresh=0.5): """Refines a pool of boxes using non max suppression and box voting. Box refinement is done independently for each class. Args: pool_boxes: (BoxList) A collection of boxes to be refined. pool_boxes must have a rank 1 'scores' field and a rank 1 'classes' field. num_classes: (int scalar) Number of classes. nms_iou_thresh: (float scalar) iou threshold for non max suppression (NMS). nms_max_detections: (int scalar) maximum output size for NMS. voting_iou_thresh: (float scalar) iou threshold for box voting. Returns: BoxList of refined boxes. Raises: ValueError: if a) nms_iou_thresh or voting_iou_thresh is not in [0, 1]. b) pool_boxes is not a BoxList. c) pool_boxes does not have a scores and classes field. """ if not 0.0 <= nms_iou_thresh <= 1.0: raise ValueError('nms_iou_thresh must be between 0 and 1') if not 0.0 <= voting_iou_thresh <= 1.0: raise ValueError('voting_iou_thresh must be between 0 and 1') if not isinstance(pool_boxes, box_list.BoxList): raise ValueError('pool_boxes must be a BoxList') if not pool_boxes.has_field('scores'): raise ValueError('pool_boxes must have a \'scores\' field') if not pool_boxes.has_field('classes'): raise ValueError('pool_boxes must have a \'classes\' field') refined_boxes = [] for i in range(num_classes): boxes_class = filter_field_value_equals(pool_boxes, 'classes', i) refined_boxes_class = refine_boxes(boxes_class, nms_iou_thresh, nms_max_detections, voting_iou_thresh) refined_boxes.append(refined_boxes_class) return sort_by_field(concatenate(refined_boxes), 'scores') def refine_boxes(pool_boxes, nms_iou_thresh, nms_max_detections, voting_iou_thresh=0.5): """Refines a pool of boxes using non max suppression and box voting. Args: pool_boxes: (BoxList) A collection of boxes to be refined. pool_boxes must have a rank 1 'scores' field. nms_iou_thresh: (float scalar) iou threshold for non max suppression (NMS). nms_max_detections: (int scalar) maximum output size for NMS. voting_iou_thresh: (float scalar) iou threshold for box voting. Returns: BoxList of refined boxes. Raises: ValueError: if a) nms_iou_thresh or voting_iou_thresh is not in [0, 1]. b) pool_boxes is not a BoxList. c) pool_boxes does not have a scores field. """ if not 0.0 <= nms_iou_thresh <= 1.0: raise ValueError('nms_iou_thresh must be between 0 and 1') if not 0.0 <= voting_iou_thresh <= 1.0: raise ValueError('voting_iou_thresh must be between 0 and 1') if not isinstance(pool_boxes, box_list.BoxList): raise ValueError('pool_boxes must be a BoxList') if not pool_boxes.has_field('scores'): raise ValueError('pool_boxes must have a \'scores\' field') nms_boxes = non_max_suppression( pool_boxes, nms_iou_thresh, nms_max_detections) return box_voting(nms_boxes, pool_boxes, voting_iou_thresh) def box_voting(selected_boxes, pool_boxes, iou_thresh=0.5): """Performs box voting as described in S. Gidaris and N. Komodakis, ICCV 2015. Performs box voting as described in 'Object detection via a multi-region & semantic segmentation-aware CNN model', Gidaris and Komodakis, ICCV 2015. For each box 'B' in selected_boxes, we find the set 'S' of boxes in pool_boxes with iou overlap >= iou_thresh. The location of B is set to the weighted average location of boxes in S (scores are used for weighting). And the score of B is set to the average score of boxes in S. Args: selected_boxes: BoxList containing a subset of boxes in pool_boxes. These boxes are usually selected from pool_boxes using non max suppression. pool_boxes: BoxList containing a set of (possibly redundant) boxes. iou_thresh: (float scalar) iou threshold for matching boxes in selected_boxes and pool_boxes. Returns: BoxList containing averaged locations and scores for each box in selected_boxes. Raises: ValueError: if a) selected_boxes or pool_boxes is not a BoxList. b) if iou_thresh is not in [0, 1]. c) pool_boxes does not have a scores field. """ if not 0.0 <= iou_thresh <= 1.0: raise ValueError('iou_thresh must be between 0 and 1') if not isinstance(selected_boxes, box_list.BoxList): raise ValueError('selected_boxes must be a BoxList') if not isinstance(pool_boxes, box_list.BoxList): raise ValueError('pool_boxes must be a BoxList') if not pool_boxes.has_field('scores'): raise ValueError('pool_boxes must have a \'scores\' field') iou_ = iou(selected_boxes, pool_boxes) match_indicator = tf.to_float(tf.greater(iou_, iou_thresh)) num_matches = tf.reduce_sum(match_indicator, 1) # TODO(kbanoop): Handle the case where some boxes in selected_boxes do not # match to any boxes in pool_boxes. For such boxes without any matches, we # should return the original boxes without voting. match_assert = tf.Assert( tf.reduce_all(tf.greater(num_matches, 0)), ['Each box in selected_boxes must match with at least one box ' 'in pool_boxes.']) scores = tf.expand_dims(pool_boxes.get_field('scores'), 1) scores_assert = tf.Assert( tf.reduce_all(tf.greater_equal(scores, 0)), ['Scores must be non negative.']) with tf.control_dependencies([scores_assert, match_assert]): sum_scores = tf.matmul(match_indicator, scores) averaged_scores = tf.reshape(sum_scores, [-1]) / num_matches box_locations = tf.matmul(match_indicator, pool_boxes.get() * scores) / sum_scores averaged_boxes = box_list.BoxList(box_locations) _copy_extra_fields(averaged_boxes, selected_boxes) averaged_boxes.add_field('scores', averaged_scores) return averaged_boxes def pad_or_clip_box_list(boxlist, num_boxes, scope=None): """Pads or clips all fields of a BoxList. Args: boxlist: A BoxList with arbitrary of number of boxes. num_boxes: First num_boxes in boxlist are kept. The fields are zero-padded if num_boxes is bigger than the actual number of boxes. scope: name scope. Returns: BoxList with all fields padded or clipped. """ with tf.name_scope(scope, 'PadOrClipBoxList'): subboxlist = box_list.BoxList(shape_utils.pad_or_clip_tensor( boxlist.get(), num_boxes)) for field in boxlist.get_extra_fields(): subfield = shape_utils.pad_or_clip_tensor( boxlist.get_field(field), num_boxes) subboxlist.add_field(field, subfield) return subboxlist def select_random_box(boxlist, default_box=None, seed=None, scope=None): """Selects a random bounding box from a `BoxList`. Args: boxlist: A BoxList. default_box: A [1, 4] float32 tensor. If no boxes are present in `boxlist`, this default box will be returned. If None, will use a default box of [[-1., -1., -1., -1.]]. seed: Random seed. scope: Name scope. Returns: bbox: A [1, 4] tensor with a random bounding box. valid: A bool tensor indicating whether a valid bounding box is returned (True) or whether the default box is returned (False). """ with tf.name_scope(scope, 'SelectRandomBox'): bboxes = boxlist.get() combined_shape = shape_utils.combined_static_and_dynamic_shape(bboxes) number_of_boxes = combined_shape[0] default_box = default_box or tf.constant([[-1., -1., -1., -1.]]) def select_box(): random_index = tf.random_uniform([], maxval=number_of_boxes, dtype=tf.int32, seed=seed) return tf.expand_dims(bboxes[random_index], axis=0), tf.constant(True) return tf.cond( tf.greater_equal(number_of_boxes, 1), true_fn=select_box, false_fn=lambda: (default_box, tf.constant(False))) def get_minimal_coverage_box(boxlist, default_box=None, scope=None): """Creates a single bounding box which covers all boxes in the boxlist. Args: boxlist: A Boxlist. default_box: A [1, 4] float32 tensor. If no boxes are present in `boxlist`, this default box will be returned. If None, will use a default box of [[0., 0., 1., 1.]]. scope: Name scope. Returns: A [1, 4] float32 tensor with a bounding box that tightly covers all the boxes in the box list. If the boxlist does not contain any boxes, the default box is returned. """ with tf.name_scope(scope, 'CreateCoverageBox'): num_boxes = boxlist.num_boxes() def coverage_box(bboxes): y_min, x_min, y_max, x_max = tf.split( value=bboxes, num_or_size_splits=4, axis=1) y_min_coverage = tf.reduce_min(y_min, axis=0) x_min_coverage = tf.reduce_min(x_min, axis=0) y_max_coverage = tf.reduce_max(y_max, axis=0) x_max_coverage = tf.reduce_max(x_max, axis=0) return tf.stack( [y_min_coverage, x_min_coverage, y_max_coverage, x_max_coverage], axis=1) default_box = default_box or tf.constant([[0., 0., 1., 1.]]) return tf.cond( tf.greater_equal(num_boxes, 1), true_fn=lambda: coverage_box(boxlist.get()), false_fn=lambda: default_box) def sample_boxes_by_jittering(boxlist, num_boxes_to_sample, stddev=0.1, scope=None): """Samples num_boxes_to_sample boxes by jittering around boxlist boxes. It is possible that this function might generate boxes with size 0. The larger the stddev, this is more probable. For a small stddev of 0.1 this probability is very small. Args: boxlist: A boxlist containing N boxes in normalized coordinates. num_boxes_to_sample: A positive integer containing the number of boxes to sample. stddev: Standard deviation. This is used to draw random offsets for the box corners from a normal distribution. The offset is multiplied by the box size so will be larger in terms of pixels for larger boxes. scope: Name scope. Returns: sampled_boxlist: A boxlist containing num_boxes_to_sample boxes in normalized coordinates. """ with tf.name_scope(scope, 'SampleBoxesByJittering'): num_boxes = boxlist.num_boxes() box_indices = tf.random_uniform( [num_boxes_to_sample], minval=0, maxval=num_boxes, dtype=tf.int32) sampled_boxes = tf.gather(boxlist.get(), box_indices) sampled_boxes_height = sampled_boxes[:, 2] - sampled_boxes[:, 0] sampled_boxes_width = sampled_boxes[:, 3] - sampled_boxes[:, 1] rand_miny_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev) rand_minx_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev) rand_maxy_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev) rand_maxx_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev) miny = rand_miny_gaussian * sampled_boxes_height + sampled_boxes[:, 0] minx = rand_minx_gaussian * sampled_boxes_width + sampled_boxes[:, 1] maxy = rand_maxy_gaussian * sampled_boxes_height + sampled_boxes[:, 2] maxx = rand_maxx_gaussian * sampled_boxes_width + sampled_boxes[:, 3] maxy = tf.maximum(miny, maxy) maxx = tf.maximum(minx, maxx) sampled_boxes = tf.stack([miny, minx, maxy, maxx], axis=1) sampled_boxes = tf.maximum(tf.minimum(sampled_boxes, 1.0), 0.0) return box_list.BoxList(sampled_boxes)
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/core/box_list_ops.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Abstract detection model. This file defines a generic base class for detection models. Programs that are designed to work with arbitrary detection models should only depend on this class. We intend for the functions in this class to follow tensor-in/tensor-out design, thus all functions have tensors or lists/dictionaries holding tensors as inputs and outputs. Abstractly, detection models predict output tensors given input images which can be passed to a loss function at training time or passed to a postprocessing function at eval time. The computation graphs at a high level consequently look as follows: Training time: inputs (images tensor) -> preprocess -> predict -> loss -> outputs (loss tensor) Evaluation time: inputs (images tensor) -> preprocess -> predict -> postprocess -> outputs (boxes tensor, scores tensor, classes tensor, num_detections tensor) DetectionModels must thus implement four functions (1) preprocess, (2) predict, (3) postprocess and (4) loss. DetectionModels should make no assumptions about the input size or aspect ratio --- they are responsible for doing any resize/reshaping necessary (see docstring for the preprocess function). Output classes are always integers in the range [0, num_classes). Any mapping of these integers to semantic labels is to be handled outside of this class. Images are resized in the `preprocess` method. All of `preprocess`, `predict`, and `postprocess` should be reentrant. The `preprocess` method runs `image_resizer_fn` that returns resized_images and `true_image_shapes`. Since `image_resizer_fn` can pad the images with zeros, true_image_shapes indicate the slices that contain the image without padding. This is useful for padding images to be a fixed size for batching. The `postprocess` method uses the true image shapes to clip predictions that lie outside of images. By default, DetectionModels produce bounding box detections; However, we support a handful of auxiliary annotations associated with each bounding box, namely, instance masks and keypoints. """ from abc import ABCMeta from abc import abstractmethod from object_detection.core import standard_fields as fields class DetectionModel(object): """Abstract base class for detection models.""" __metaclass__ = ABCMeta def __init__(self, num_classes): """Constructor. Args: num_classes: number of classes. Note that num_classes *does not* include background categories that might be implicitly predicted in various implementations. """ self._num_classes = num_classes self._groundtruth_lists = {} @property def num_classes(self): return self._num_classes def groundtruth_lists(self, field): """Access list of groundtruth tensors. Args: field: a string key, options are fields.BoxListFields.{boxes,classes,masks,keypoints} or fields.InputDataFields.is_annotated. Returns: a list of tensors holding groundtruth information (see also provide_groundtruth function below), with one entry for each image in the batch. Raises: RuntimeError: if the field has not been provided via provide_groundtruth. """ if field not in self._groundtruth_lists: raise RuntimeError('Groundtruth tensor {} has not been provided'.format( field)) return self._groundtruth_lists[field] def groundtruth_has_field(self, field): """Determines whether the groundtruth includes the given field. Args: field: a string key, options are fields.BoxListFields.{boxes,classes,masks,keypoints} or fields.InputDataFields.is_annotated. Returns: True if the groundtruth includes the given field, False otherwise. """ return field in self._groundtruth_lists @abstractmethod def preprocess(self, inputs): """Input preprocessing. To be overridden by implementations. This function is responsible for any scaling/shifting of input values that is necessary prior to running the detector on an input image. It is also responsible for any resizing, padding that might be necessary as images are assumed to arrive in arbitrary sizes. While this function could conceivably be part of the predict method (below), it is often convenient to keep these separate --- for example, we may want to preprocess on one device, place onto a queue, and let another device (e.g., the GPU) handle prediction. A few important notes about the preprocess function: + We assume that this operation does not have any trainable variables nor does it affect the groundtruth annotations in any way (thus data augmentation operations such as random cropping should be performed externally). + There is no assumption that the batchsize in this function is the same as the batch size in the predict function. In fact, we recommend calling the preprocess function prior to calling any batching operations (which should happen outside of the model) and thus assuming that batch sizes are equal to 1 in the preprocess function. + There is also no explicit assumption that the output resolutions must be fixed across inputs --- this is to support "fully convolutional" settings in which input images can have different shapes/resolutions. Args: inputs: a [batch, height_in, width_in, channels] float32 tensor representing a batch of images with values between 0 and 255.0. Returns: preprocessed_inputs: a [batch, height_out, width_out, channels] float32 tensor representing a batch of images. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. """ pass @abstractmethod def predict(self, preprocessed_inputs, true_image_shapes): """Predict prediction tensors from inputs tensor. Outputs of this function can be passed to loss or postprocess functions. Args: preprocessed_inputs: a [batch, height, width, channels] float32 tensor representing a batch of images. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. Returns: prediction_dict: a dictionary holding prediction tensors to be passed to the Loss or Postprocess functions. """ pass @abstractmethod def postprocess(self, prediction_dict, true_image_shapes, **params): """Convert predicted output tensors to final detections. Outputs adhere to the following conventions: * Classes are integers in [0, num_classes); background classes are removed and the first non-background class is mapped to 0. If the model produces class-agnostic detections, then no output is produced for classes. * Boxes are to be interpreted as being in [y_min, x_min, y_max, x_max] format and normalized relative to the image window. * `num_detections` is provided for settings where detections are padded to a fixed number of boxes. * We do not specifically assume any kind of probabilistic interpretation of the scores --- the only important thing is their relative ordering. Thus implementations of the postprocess function are free to output logits, probabilities, calibrated probabilities, or anything else. Args: prediction_dict: a dictionary holding prediction tensors. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. **params: Additional keyword arguments for specific implementations of DetectionModel. Returns: detections: a dictionary containing the following fields detection_boxes: [batch, max_detections, 4] detection_scores: [batch, max_detections] detection_classes: [batch, max_detections] (If a model is producing class-agnostic detections, this field may be missing) instance_masks: [batch, max_detections, image_height, image_width] (optional) keypoints: [batch, max_detections, num_keypoints, 2] (optional) num_detections: [batch] """ pass @abstractmethod def loss(self, prediction_dict, true_image_shapes): """Compute scalar loss tensors with respect to provided groundtruth. Calling this function requires that groundtruth tensors have been provided via the provide_groundtruth function. Args: prediction_dict: a dictionary holding predicted tensors true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. Returns: a dictionary mapping strings (loss names) to scalar tensors representing loss values. """ pass def provide_groundtruth(self, groundtruth_boxes_list, groundtruth_classes_list, groundtruth_masks_list=None, groundtruth_keypoints_list=None, groundtruth_weights_list=None, groundtruth_confidences_list=None, groundtruth_is_crowd_list=None, is_annotated_list=None): """Provide groundtruth tensors. Args: groundtruth_boxes_list: a list of 2-D tf.float32 tensors of shape [num_boxes, 4] containing coordinates of the groundtruth boxes. Groundtruth boxes are provided in [y_min, x_min, y_max, x_max] format and assumed to be normalized and clipped relative to the image window with y_min <= y_max and x_min <= x_max. groundtruth_classes_list: a list of 2-D tf.float32 one-hot (or k-hot) tensors of shape [num_boxes, num_classes] containing the class targets with the 0th index assumed to map to the first non-background class. groundtruth_masks_list: a list of 3-D tf.float32 tensors of shape [num_boxes, height_in, width_in] containing instance masks with values in {0, 1}. If None, no masks are provided. Mask resolution `height_in`x`width_in` must agree with the resolution of the input image tensor provided to the `preprocess` function. groundtruth_keypoints_list: a list of 3-D tf.float32 tensors of shape [num_boxes, num_keypoints, 2] containing keypoints. Keypoints are assumed to be provided in normalized coordinates and missing keypoints should be encoded as NaN. groundtruth_weights_list: A list of 1-D tf.float32 tensors of shape [num_boxes] containing weights for groundtruth boxes. groundtruth_confidences_list: A list of 2-D tf.float32 tensors of shape [num_boxes, num_classes] containing class confidences for groundtruth boxes. groundtruth_is_crowd_list: A list of 1-D tf.bool tensors of shape [num_boxes] containing is_crowd annotations is_annotated_list: A list of scalar tf.bool tensors indicating whether images have been labeled or not. """ self._groundtruth_lists[fields.BoxListFields.boxes] = groundtruth_boxes_list self._groundtruth_lists[ fields.BoxListFields.classes] = groundtruth_classes_list if groundtruth_weights_list: self._groundtruth_lists[fields.BoxListFields. weights] = groundtruth_weights_list if groundtruth_confidences_list: self._groundtruth_lists[fields.BoxListFields. confidences] = groundtruth_confidences_list if groundtruth_masks_list: self._groundtruth_lists[ fields.BoxListFields.masks] = groundtruth_masks_list if groundtruth_keypoints_list: self._groundtruth_lists[ fields.BoxListFields.keypoints] = groundtruth_keypoints_list if groundtruth_is_crowd_list: self._groundtruth_lists[ fields.BoxListFields.is_crowd] = groundtruth_is_crowd_list if is_annotated_list: self._groundtruth_lists[ fields.InputDataFields.is_annotated] = is_annotated_list @abstractmethod def regularization_losses(self): """Returns a list of regularization losses for this model. Returns a list of regularization losses for this model that the estimator needs to use during training/optimization. Returns: A list of regularization loss tensors. """ pass @abstractmethod def restore_map(self, fine_tune_checkpoint_type='detection'): """Returns a map of variables to load from a foreign checkpoint. Returns a map of variable names to load from a checkpoint to variables in the model graph. This enables the model to initialize based on weights from another task. For example, the feature extractor variables from a classification model can be used to bootstrap training of an object detector. When loading from an object detection model, the checkpoint model should have the same parameters as this detection model with exception of the num_classes parameter. Args: fine_tune_checkpoint_type: whether to restore from a full detection checkpoint (with compatible variable names) or to restore from a classification checkpoint for initialization prior to training. Valid values: `detection`, `classification`. Default 'detection'. Returns: A dict mapping variable names (to load from a checkpoint) to variables in the model graph. """ pass @abstractmethod def updates(self): """Returns a list of update operators for this model. Returns a list of update operators for this model that must be executed at each training step. The estimator's train op needs to have a control dependency on these updates. Returns: A list of update operators. """ pass
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/core/model.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Base anchor generator. The job of the anchor generator is to create (or load) a collection of bounding boxes to be used as anchors. Generated anchors are assumed to match some convolutional grid or list of grid shapes. For example, we might want to generate anchors matching an 8x8 feature map and a 4x4 feature map. If we place 3 anchors per grid location on the first feature map and 6 anchors per grid location on the second feature map, then 3*8*8 + 6*4*4 = 288 anchors are generated in total. To support fully convolutional settings, feature map shapes are passed dynamically at generation time. The number of anchors to place at each location is static --- implementations of AnchorGenerator must always be able return the number of anchors that it uses per location for each feature map. """ from abc import ABCMeta from abc import abstractmethod import tensorflow as tf class AnchorGenerator(object): """Abstract base class for anchor generators.""" __metaclass__ = ABCMeta @abstractmethod def name_scope(self): """Name scope. Must be defined by implementations. Returns: a string representing the name scope of the anchor generation operation. """ pass @property def check_num_anchors(self): """Whether to dynamically check the number of anchors generated. Can be overridden by implementations that would like to disable this behavior. Returns: a boolean controlling whether the Generate function should dynamically check the number of anchors generated against the mathematically expected number of anchors. """ return True @abstractmethod def num_anchors_per_location(self): """Returns the number of anchors per spatial location. Returns: a list of integers, one for each expected feature map to be passed to the `generate` function. """ pass def generate(self, feature_map_shape_list, **params): """Generates a collection of bounding boxes to be used as anchors. TODO(rathodv): remove **params from argument list and make stride and offsets (for multiple_grid_anchor_generator) constructor arguments. Args: feature_map_shape_list: list of (height, width) pairs in the format [(height_0, width_0), (height_1, width_1), ...] that the generated anchors must align with. Pairs can be provided as 1-dimensional integer tensors of length 2 or simply as tuples of integers. **params: parameters for anchor generation op Returns: boxes_list: a list of BoxLists each holding anchor boxes corresponding to the input feature map shapes. Raises: ValueError: if the number of feature map shapes does not match the length of NumAnchorsPerLocation. """ if self.check_num_anchors and ( len(feature_map_shape_list) != len(self.num_anchors_per_location())): raise ValueError('Number of feature maps is expected to equal the length ' 'of `num_anchors_per_location`.') with tf.name_scope(self.name_scope()): anchors_list = self._generate(feature_map_shape_list, **params) if self.check_num_anchors: with tf.control_dependencies([ self._assert_correct_number_of_anchors( anchors_list, feature_map_shape_list)]): for item in anchors_list: item.set(tf.identity(item.get())) return anchors_list @abstractmethod def _generate(self, feature_map_shape_list, **params): """To be overridden by implementations. Args: feature_map_shape_list: list of (height, width) pairs in the format [(height_0, width_0), (height_1, width_1), ...] that the generated anchors must align with. **params: parameters for anchor generation op Returns: boxes_list: a list of BoxList, each holding a collection of N anchor boxes. """ pass def _assert_correct_number_of_anchors(self, anchors_list, feature_map_shape_list): """Assert that correct number of anchors was generated. Args: anchors_list: A list of box_list.BoxList object holding anchors generated. feature_map_shape_list: list of (height, width) pairs in the format [(height_0, width_0), (height_1, width_1), ...] that the generated anchors must align with. Returns: Op that raises InvalidArgumentError if the number of anchors does not match the number of expected anchors. """ expected_num_anchors = 0 actual_num_anchors = 0 for num_anchors_per_location, feature_map_shape, anchors in zip( self.num_anchors_per_location(), feature_map_shape_list, anchors_list): expected_num_anchors += (num_anchors_per_location * feature_map_shape[0] * feature_map_shape[1]) actual_num_anchors += anchors.num_boxes() return tf.assert_equal(expected_num_anchors, actual_num_anchors)
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/core/anchor_generator.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for google3.research.vale.object_detection.minibatch_sampler.""" import numpy as np import tensorflow as tf from object_detection.core import minibatch_sampler class MinibatchSamplerTest(tf.test.TestCase): def test_subsample_indicator_when_more_true_elements_than_num_samples(self): np_indicator = [True, False, True, False, True, True, False] indicator = tf.constant(np_indicator) samples = minibatch_sampler.MinibatchSampler.subsample_indicator( indicator, 3) with self.test_session() as sess: samples_out = sess.run(samples) self.assertTrue(np.sum(samples_out), 3) self.assertAllEqual(samples_out, np.logical_and(samples_out, np_indicator)) def test_subsample_when_more_true_elements_than_num_samples_no_shape(self): np_indicator = [True, False, True, False, True, True, False] indicator = tf.placeholder(tf.bool) feed_dict = {indicator: np_indicator} samples = minibatch_sampler.MinibatchSampler.subsample_indicator( indicator, 3) with self.test_session() as sess: samples_out = sess.run(samples, feed_dict=feed_dict) self.assertTrue(np.sum(samples_out), 3) self.assertAllEqual(samples_out, np.logical_and(samples_out, np_indicator)) def test_subsample_indicator_when_less_true_elements_than_num_samples(self): np_indicator = [True, False, True, False, True, True, False] indicator = tf.constant(np_indicator) samples = minibatch_sampler.MinibatchSampler.subsample_indicator( indicator, 5) with self.test_session() as sess: samples_out = sess.run(samples) self.assertTrue(np.sum(samples_out), 4) self.assertAllEqual(samples_out, np.logical_and(samples_out, np_indicator)) def test_subsample_indicator_when_num_samples_is_zero(self): np_indicator = [True, False, True, False, True, True, False] indicator = tf.constant(np_indicator) samples_none = minibatch_sampler.MinibatchSampler.subsample_indicator( indicator, 0) with self.test_session() as sess: samples_none_out = sess.run(samples_none) self.assertAllEqual( np.zeros_like(samples_none_out, dtype=bool), samples_none_out) def test_subsample_indicator_when_indicator_all_false(self): indicator_empty = tf.zeros([0], dtype=tf.bool) samples_empty = minibatch_sampler.MinibatchSampler.subsample_indicator( indicator_empty, 4) with self.test_session() as sess: samples_empty_out = sess.run(samples_empty) self.assertEqual(0, samples_empty_out.size) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/core/minibatch_sampler_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.core.preprocessor.""" import numpy as np import six import tensorflow as tf from object_detection.core import preprocessor from object_detection.core import preprocessor_cache from object_detection.core import standard_fields as fields if six.PY2: import mock # pylint: disable=g-import-not-at-top else: from unittest import mock # pylint: disable=g-import-not-at-top class PreprocessorTest(tf.test.TestCase): def createColorfulTestImage(self): ch255 = tf.fill([1, 100, 200, 1], tf.constant(255, dtype=tf.uint8)) ch128 = tf.fill([1, 100, 200, 1], tf.constant(128, dtype=tf.uint8)) ch0 = tf.fill([1, 100, 200, 1], tf.constant(0, dtype=tf.uint8)) imr = tf.concat([ch255, ch0, ch0], 3) img = tf.concat([ch255, ch255, ch0], 3) imb = tf.concat([ch255, ch0, ch255], 3) imw = tf.concat([ch128, ch128, ch128], 3) imu = tf.concat([imr, img], 2) imd = tf.concat([imb, imw], 2) im = tf.concat([imu, imd], 1) return im def createTestImages(self): images_r = tf.constant([[[128, 128, 128, 128], [0, 0, 128, 128], [0, 128, 128, 128], [192, 192, 128, 128]]], dtype=tf.uint8) images_r = tf.expand_dims(images_r, 3) images_g = tf.constant([[[0, 0, 128, 128], [0, 0, 128, 128], [0, 128, 192, 192], [192, 192, 128, 192]]], dtype=tf.uint8) images_g = tf.expand_dims(images_g, 3) images_b = tf.constant([[[128, 128, 192, 0], [0, 0, 128, 192], [0, 128, 128, 0], [192, 192, 192, 128]]], dtype=tf.uint8) images_b = tf.expand_dims(images_b, 3) images = tf.concat([images_r, images_g, images_b], 3) return images def createEmptyTestBoxes(self): boxes = tf.constant([[]], dtype=tf.float32) return boxes def createTestBoxes(self): boxes = tf.constant( [[0.0, 0.25, 0.75, 1.0], [0.25, 0.5, 0.75, 1.0]], dtype=tf.float32) return boxes def createTestGroundtruthWeights(self): return tf.constant([1.0, 0.5], dtype=tf.float32) def createTestMasks(self): mask = np.array([ [[255.0, 0.0, 0.0], [255.0, 0.0, 0.0], [255.0, 0.0, 0.0]], [[255.0, 255.0, 0.0], [255.0, 255.0, 0.0], [255.0, 255.0, 0.0]]]) return tf.constant(mask, dtype=tf.float32) def createTestKeypoints(self): keypoints = np.array([ [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]], [[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]], ]) return tf.constant(keypoints, dtype=tf.float32) def createTestKeypointsInsideCrop(self): keypoints = np.array([ [[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]], [[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]], ]) return tf.constant(keypoints, dtype=tf.float32) def createTestKeypointsOutsideCrop(self): keypoints = np.array([ [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]], [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]], ]) return tf.constant(keypoints, dtype=tf.float32) def createKeypointFlipPermutation(self): return np.array([0, 2, 1], dtype=np.int32) def createTestLabels(self): labels = tf.constant([1, 2], dtype=tf.int32) return labels def createTestBoxesOutOfImage(self): boxes = tf.constant( [[-0.1, 0.25, 0.75, 1], [0.25, 0.5, 0.75, 1.1]], dtype=tf.float32) return boxes def createTestMultiClassScores(self): return tf.constant([[1.0, 0.0], [0.5, 0.5]], dtype=tf.float32) def expectedImagesAfterNormalization(self): images_r = tf.constant([[[0, 0, 0, 0], [-1, -1, 0, 0], [-1, 0, 0, 0], [0.5, 0.5, 0, 0]]], dtype=tf.float32) images_r = tf.expand_dims(images_r, 3) images_g = tf.constant([[[-1, -1, 0, 0], [-1, -1, 0, 0], [-1, 0, 0.5, 0.5], [0.5, 0.5, 0, 0.5]]], dtype=tf.float32) images_g = tf.expand_dims(images_g, 3) images_b = tf.constant([[[0, 0, 0.5, -1], [-1, -1, 0, 0.5], [-1, 0, 0, -1], [0.5, 0.5, 0.5, 0]]], dtype=tf.float32) images_b = tf.expand_dims(images_b, 3) images = tf.concat([images_r, images_g, images_b], 3) return images def expectedMaxImageAfterColorScale(self): images_r = tf.constant([[[0.1, 0.1, 0.1, 0.1], [-0.9, -0.9, 0.1, 0.1], [-0.9, 0.1, 0.1, 0.1], [0.6, 0.6, 0.1, 0.1]]], dtype=tf.float32) images_r = tf.expand_dims(images_r, 3) images_g = tf.constant([[[-0.9, -0.9, 0.1, 0.1], [-0.9, -0.9, 0.1, 0.1], [-0.9, 0.1, 0.6, 0.6], [0.6, 0.6, 0.1, 0.6]]], dtype=tf.float32) images_g = tf.expand_dims(images_g, 3) images_b = tf.constant([[[0.1, 0.1, 0.6, -0.9], [-0.9, -0.9, 0.1, 0.6], [-0.9, 0.1, 0.1, -0.9], [0.6, 0.6, 0.6, 0.1]]], dtype=tf.float32) images_b = tf.expand_dims(images_b, 3) images = tf.concat([images_r, images_g, images_b], 3) return images def expectedMinImageAfterColorScale(self): images_r = tf.constant([[[-0.1, -0.1, -0.1, -0.1], [-1, -1, -0.1, -0.1], [-1, -0.1, -0.1, -0.1], [0.4, 0.4, -0.1, -0.1]]], dtype=tf.float32) images_r = tf.expand_dims(images_r, 3) images_g = tf.constant([[[-1, -1, -0.1, -0.1], [-1, -1, -0.1, -0.1], [-1, -0.1, 0.4, 0.4], [0.4, 0.4, -0.1, 0.4]]], dtype=tf.float32) images_g = tf.expand_dims(images_g, 3) images_b = tf.constant([[[-0.1, -0.1, 0.4, -1], [-1, -1, -0.1, 0.4], [-1, -0.1, -0.1, -1], [0.4, 0.4, 0.4, -0.1]]], dtype=tf.float32) images_b = tf.expand_dims(images_b, 3) images = tf.concat([images_r, images_g, images_b], 3) return images def expectedImagesAfterLeftRightFlip(self): images_r = tf.constant([[[0, 0, 0, 0], [0, 0, -1, -1], [0, 0, 0, -1], [0, 0, 0.5, 0.5]]], dtype=tf.float32) images_r = tf.expand_dims(images_r, 3) images_g = tf.constant([[[0, 0, -1, -1], [0, 0, -1, -1], [0.5, 0.5, 0, -1], [0.5, 0, 0.5, 0.5]]], dtype=tf.float32) images_g = tf.expand_dims(images_g, 3) images_b = tf.constant([[[-1, 0.5, 0, 0], [0.5, 0, -1, -1], [-1, 0, 0, -1], [0, 0.5, 0.5, 0.5]]], dtype=tf.float32) images_b = tf.expand_dims(images_b, 3) images = tf.concat([images_r, images_g, images_b], 3) return images def expectedImagesAfterUpDownFlip(self): images_r = tf.constant([[[0.5, 0.5, 0, 0], [-1, 0, 0, 0], [-1, -1, 0, 0], [0, 0, 0, 0]]], dtype=tf.float32) images_r = tf.expand_dims(images_r, 3) images_g = tf.constant([[[0.5, 0.5, 0, 0.5], [-1, 0, 0.5, 0.5], [-1, -1, 0, 0], [-1, -1, 0, 0]]], dtype=tf.float32) images_g = tf.expand_dims(images_g, 3) images_b = tf.constant([[[0.5, 0.5, 0.5, 0], [-1, 0, 0, -1], [-1, -1, 0, 0.5], [0, 0, 0.5, -1]]], dtype=tf.float32) images_b = tf.expand_dims(images_b, 3) images = tf.concat([images_r, images_g, images_b], 3) return images def expectedImagesAfterRot90(self): images_r = tf.constant([[[0, 0, 0, 0], [0, 0, 0, 0], [0, -1, 0, 0.5], [0, -1, -1, 0.5]]], dtype=tf.float32) images_r = tf.expand_dims(images_r, 3) images_g = tf.constant([[[0, 0, 0.5, 0.5], [0, 0, 0.5, 0], [-1, -1, 0, 0.5], [-1, -1, -1, 0.5]]], dtype=tf.float32) images_g = tf.expand_dims(images_g, 3) images_b = tf.constant([[[-1, 0.5, -1, 0], [0.5, 0, 0, 0.5], [0, -1, 0, 0.5], [0, -1, -1, 0.5]]], dtype=tf.float32) images_b = tf.expand_dims(images_b, 3) images = tf.concat([images_r, images_g, images_b], 3) return images def expectedBoxesAfterLeftRightFlip(self): boxes = tf.constant([[0.0, 0.0, 0.75, 0.75], [0.25, 0.0, 0.75, 0.5]], dtype=tf.float32) return boxes def expectedBoxesAfterUpDownFlip(self): boxes = tf.constant([[0.25, 0.25, 1.0, 1.0], [0.25, 0.5, 0.75, 1.0]], dtype=tf.float32) return boxes def expectedBoxesAfterRot90(self): boxes = tf.constant( [[0.0, 0.0, 0.75, 0.75], [0.0, 0.25, 0.5, 0.75]], dtype=tf.float32) return boxes def expectedMasksAfterLeftRightFlip(self): mask = np.array([ [[0.0, 0.0, 255.0], [0.0, 0.0, 255.0], [0.0, 0.0, 255.0]], [[0.0, 255.0, 255.0], [0.0, 255.0, 255.0], [0.0, 255.0, 255.0]]]) return tf.constant(mask, dtype=tf.float32) def expectedMasksAfterUpDownFlip(self): mask = np.array([ [[255.0, 0.0, 0.0], [255.0, 0.0, 0.0], [255.0, 0.0, 0.0]], [[255.0, 255.0, 0.0], [255.0, 255.0, 0.0], [255.0, 255.0, 0.0]]]) return tf.constant(mask, dtype=tf.float32) def expectedMasksAfterRot90(self): mask = np.array([ [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [255.0, 255.0, 255.0]], [[0.0, 0.0, 0.0], [255.0, 255.0, 255.0], [255.0, 255.0, 255.0]]]) return tf.constant(mask, dtype=tf.float32) def expectedLabelScoresAfterThresholding(self): return tf.constant([1.0], dtype=tf.float32) def expectedBoxesAfterThresholding(self): return tf.constant([[0.0, 0.25, 0.75, 1.0]], dtype=tf.float32) def expectedLabelsAfterThresholding(self): return tf.constant([1], dtype=tf.float32) def expectedMultiClassScoresAfterThresholding(self): return tf.constant([[1.0, 0.0]], dtype=tf.float32) def expectedMasksAfterThresholding(self): mask = np.array([ [[255.0, 0.0, 0.0], [255.0, 0.0, 0.0], [255.0, 0.0, 0.0]]]) return tf.constant(mask, dtype=tf.float32) def expectedKeypointsAfterThresholding(self): keypoints = np.array([ [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]] ]) return tf.constant(keypoints, dtype=tf.float32) def expectedLabelScoresAfterThresholdingWithMissingScore(self): return tf.constant([np.nan], dtype=tf.float32) def expectedBoxesAfterThresholdingWithMissingScore(self): return tf.constant([[0.25, 0.5, 0.75, 1]], dtype=tf.float32) def expectedLabelsAfterThresholdingWithMissingScore(self): return tf.constant([2], dtype=tf.float32) def testRgbToGrayscale(self): images = self.createTestImages() grayscale_images = preprocessor._rgb_to_grayscale(images) expected_images = tf.image.rgb_to_grayscale(images) with self.test_session() as sess: (grayscale_images, expected_images) = sess.run( [grayscale_images, expected_images]) self.assertAllEqual(expected_images, grayscale_images) def testNormalizeImage(self): preprocess_options = [(preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 256, 'target_minval': -1, 'target_maxval': 1 })] images = self.createTestImages() tensor_dict = {fields.InputDataFields.image: images} tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options) images = tensor_dict[fields.InputDataFields.image] images_expected = self.expectedImagesAfterNormalization() with self.test_session() as sess: (images_, images_expected_) = sess.run( [images, images_expected]) images_shape_ = images_.shape images_expected_shape_ = images_expected_.shape expected_shape = [1, 4, 4, 3] self.assertAllEqual(images_expected_shape_, images_shape_) self.assertAllEqual(images_shape_, expected_shape) self.assertAllClose(images_, images_expected_) def testRetainBoxesAboveThreshold(self): boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() (retained_boxes, retained_labels, retained_weights) = preprocessor.retain_boxes_above_threshold( boxes, labels, weights, threshold=0.6) with self.test_session() as sess: (retained_boxes_, retained_labels_, retained_weights_, expected_retained_boxes_, expected_retained_labels_, expected_retained_weights_) = sess.run([ retained_boxes, retained_labels, retained_weights, self.expectedBoxesAfterThresholding(), self.expectedLabelsAfterThresholding(), self.expectedLabelScoresAfterThresholding()]) self.assertAllClose( retained_boxes_, expected_retained_boxes_) self.assertAllClose( retained_labels_, expected_retained_labels_) self.assertAllClose( retained_weights_, expected_retained_weights_) def testRetainBoxesAboveThresholdWithMultiClassScores(self): boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() multiclass_scores = self.createTestMultiClassScores() (_, _, _, retained_multiclass_scores) = preprocessor.retain_boxes_above_threshold( boxes, labels, weights, multiclass_scores=multiclass_scores, threshold=0.6) with self.test_session() as sess: (retained_multiclass_scores_, expected_retained_multiclass_scores_) = sess.run([ retained_multiclass_scores, self.expectedMultiClassScoresAfterThresholding() ]) self.assertAllClose(retained_multiclass_scores_, expected_retained_multiclass_scores_) def testRetainBoxesAboveThresholdWithMasks(self): boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() masks = self.createTestMasks() _, _, _, retained_masks = preprocessor.retain_boxes_above_threshold( boxes, labels, weights, masks, threshold=0.6) with self.test_session() as sess: retained_masks_, expected_retained_masks_ = sess.run([ retained_masks, self.expectedMasksAfterThresholding()]) self.assertAllClose( retained_masks_, expected_retained_masks_) def testRetainBoxesAboveThresholdWithKeypoints(self): boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() keypoints = self.createTestKeypoints() (_, _, _, retained_keypoints) = preprocessor.retain_boxes_above_threshold( boxes, labels, weights, keypoints=keypoints, threshold=0.6) with self.test_session() as sess: (retained_keypoints_, expected_retained_keypoints_) = sess.run([ retained_keypoints, self.expectedKeypointsAfterThresholding()]) self.assertAllClose( retained_keypoints_, expected_retained_keypoints_) def testFlipBoxesLeftRight(self): boxes = self.createTestBoxes() flipped_boxes = preprocessor._flip_boxes_left_right(boxes) expected_boxes = self.expectedBoxesAfterLeftRightFlip() with self.test_session() as sess: flipped_boxes, expected_boxes = sess.run([flipped_boxes, expected_boxes]) self.assertAllEqual(flipped_boxes.flatten(), expected_boxes.flatten()) def testFlipBoxesUpDown(self): boxes = self.createTestBoxes() flipped_boxes = preprocessor._flip_boxes_up_down(boxes) expected_boxes = self.expectedBoxesAfterUpDownFlip() with self.test_session() as sess: flipped_boxes, expected_boxes = sess.run([flipped_boxes, expected_boxes]) self.assertAllEqual(flipped_boxes.flatten(), expected_boxes.flatten()) def testRot90Boxes(self): boxes = self.createTestBoxes() rotated_boxes = preprocessor._rot90_boxes(boxes) expected_boxes = self.expectedBoxesAfterRot90() with self.test_session() as sess: rotated_boxes, expected_boxes = sess.run([rotated_boxes, expected_boxes]) self.assertAllEqual(rotated_boxes.flatten(), expected_boxes.flatten()) def testFlipMasksLeftRight(self): test_mask = self.createTestMasks() flipped_mask = preprocessor._flip_masks_left_right(test_mask) expected_mask = self.expectedMasksAfterLeftRightFlip() with self.test_session() as sess: flipped_mask, expected_mask = sess.run([flipped_mask, expected_mask]) self.assertAllEqual(flipped_mask.flatten(), expected_mask.flatten()) def testFlipMasksUpDown(self): test_mask = self.createTestMasks() flipped_mask = preprocessor._flip_masks_up_down(test_mask) expected_mask = self.expectedMasksAfterUpDownFlip() with self.test_session() as sess: flipped_mask, expected_mask = sess.run([flipped_mask, expected_mask]) self.assertAllEqual(flipped_mask.flatten(), expected_mask.flatten()) def testRot90Masks(self): test_mask = self.createTestMasks() rotated_mask = preprocessor._rot90_masks(test_mask) expected_mask = self.expectedMasksAfterRot90() with self.test_session() as sess: rotated_mask, expected_mask = sess.run([rotated_mask, expected_mask]) self.assertAllEqual(rotated_mask.flatten(), expected_mask.flatten()) def _testPreprocessorCache(self, preprocess_options, test_boxes=False, test_masks=False, test_keypoints=False, num_runs=4): cache = preprocessor_cache.PreprocessorCache() images = self.createTestImages() boxes = self.createTestBoxes() weights = self.createTestGroundtruthWeights() classes = self.createTestLabels() masks = self.createTestMasks() keypoints = self.createTestKeypoints() preprocessor_arg_map = preprocessor.get_default_func_arg_map( include_instance_masks=test_masks, include_keypoints=test_keypoints) out = [] for i in range(num_runs): tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_weights: weights } num_outputs = 1 if test_boxes: tensor_dict[fields.InputDataFields.groundtruth_boxes] = boxes tensor_dict[fields.InputDataFields.groundtruth_classes] = classes num_outputs += 1 if test_masks: tensor_dict[fields.InputDataFields.groundtruth_instance_masks] = masks num_outputs += 1 if test_keypoints: tensor_dict[fields.InputDataFields.groundtruth_keypoints] = keypoints num_outputs += 1 out.append(preprocessor.preprocess( tensor_dict, preprocess_options, preprocessor_arg_map, cache)) with self.test_session() as sess: to_run = [] for i in range(num_runs): to_run.append(out[i][fields.InputDataFields.image]) if test_boxes: to_run.append(out[i][fields.InputDataFields.groundtruth_boxes]) if test_masks: to_run.append( out[i][fields.InputDataFields.groundtruth_instance_masks]) if test_keypoints: to_run.append(out[i][fields.InputDataFields.groundtruth_keypoints]) out_array = sess.run(to_run) for i in range(num_outputs, len(out_array)): self.assertAllClose(out_array[i], out_array[i - num_outputs]) def testRandomHorizontalFlip(self): preprocess_options = [(preprocessor.random_horizontal_flip, {})] images = self.expectedImagesAfterNormalization() boxes = self.createTestBoxes() tensor_dict = {fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes} images_expected1 = self.expectedImagesAfterLeftRightFlip() boxes_expected1 = self.expectedBoxesAfterLeftRightFlip() images_expected2 = images boxes_expected2 = boxes tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options) images = tensor_dict[fields.InputDataFields.image] boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] boxes_diff1 = tf.squared_difference(boxes, boxes_expected1) boxes_diff2 = tf.squared_difference(boxes, boxes_expected2) boxes_diff = tf.multiply(boxes_diff1, boxes_diff2) boxes_diff_expected = tf.zeros_like(boxes_diff) images_diff1 = tf.squared_difference(images, images_expected1) images_diff2 = tf.squared_difference(images, images_expected2) images_diff = tf.multiply(images_diff1, images_diff2) images_diff_expected = tf.zeros_like(images_diff) with self.test_session() as sess: (images_diff_, images_diff_expected_, boxes_diff_, boxes_diff_expected_) = sess.run([images_diff, images_diff_expected, boxes_diff, boxes_diff_expected]) self.assertAllClose(boxes_diff_, boxes_diff_expected_) self.assertAllClose(images_diff_, images_diff_expected_) def testRandomHorizontalFlipWithEmptyBoxes(self): preprocess_options = [(preprocessor.random_horizontal_flip, {})] images = self.expectedImagesAfterNormalization() boxes = self.createEmptyTestBoxes() tensor_dict = {fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes} images_expected1 = self.expectedImagesAfterLeftRightFlip() boxes_expected = self.createEmptyTestBoxes() images_expected2 = images tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options) images = tensor_dict[fields.InputDataFields.image] boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] images_diff1 = tf.squared_difference(images, images_expected1) images_diff2 = tf.squared_difference(images, images_expected2) images_diff = tf.multiply(images_diff1, images_diff2) images_diff_expected = tf.zeros_like(images_diff) with self.test_session() as sess: (images_diff_, images_diff_expected_, boxes_, boxes_expected_) = sess.run([images_diff, images_diff_expected, boxes, boxes_expected]) self.assertAllClose(boxes_, boxes_expected_) self.assertAllClose(images_diff_, images_diff_expected_) def testRandomHorizontalFlipWithCache(self): keypoint_flip_permutation = self.createKeypointFlipPermutation() preprocess_options = [ (preprocessor.random_horizontal_flip, {'keypoint_flip_permutation': keypoint_flip_permutation})] self._testPreprocessorCache(preprocess_options, test_boxes=True, test_masks=True, test_keypoints=True) def testRunRandomHorizontalFlipWithMaskAndKeypoints(self): preprocess_options = [(preprocessor.random_horizontal_flip, {})] image_height = 3 image_width = 3 images = tf.random_uniform([1, image_height, image_width, 3]) boxes = self.createTestBoxes() masks = self.createTestMasks() keypoints = self.createTestKeypoints() keypoint_flip_permutation = self.createKeypointFlipPermutation() tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_instance_masks: masks, fields.InputDataFields.groundtruth_keypoints: keypoints } preprocess_options = [ (preprocessor.random_horizontal_flip, {'keypoint_flip_permutation': keypoint_flip_permutation})] preprocessor_arg_map = preprocessor.get_default_func_arg_map( include_instance_masks=True, include_keypoints=True) tensor_dict = preprocessor.preprocess( tensor_dict, preprocess_options, func_arg_map=preprocessor_arg_map) boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] masks = tensor_dict[fields.InputDataFields.groundtruth_instance_masks] keypoints = tensor_dict[fields.InputDataFields.groundtruth_keypoints] with self.test_session() as sess: boxes, masks, keypoints = sess.run([boxes, masks, keypoints]) self.assertTrue(boxes is not None) self.assertTrue(masks is not None) self.assertTrue(keypoints is not None) def testRandomVerticalFlip(self): preprocess_options = [(preprocessor.random_vertical_flip, {})] images = self.expectedImagesAfterNormalization() boxes = self.createTestBoxes() tensor_dict = {fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes} images_expected1 = self.expectedImagesAfterUpDownFlip() boxes_expected1 = self.expectedBoxesAfterUpDownFlip() images_expected2 = images boxes_expected2 = boxes tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options) images = tensor_dict[fields.InputDataFields.image] boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] boxes_diff1 = tf.squared_difference(boxes, boxes_expected1) boxes_diff2 = tf.squared_difference(boxes, boxes_expected2) boxes_diff = tf.multiply(boxes_diff1, boxes_diff2) boxes_diff_expected = tf.zeros_like(boxes_diff) images_diff1 = tf.squared_difference(images, images_expected1) images_diff2 = tf.squared_difference(images, images_expected2) images_diff = tf.multiply(images_diff1, images_diff2) images_diff_expected = tf.zeros_like(images_diff) with self.test_session() as sess: (images_diff_, images_diff_expected_, boxes_diff_, boxes_diff_expected_) = sess.run([images_diff, images_diff_expected, boxes_diff, boxes_diff_expected]) self.assertAllClose(boxes_diff_, boxes_diff_expected_) self.assertAllClose(images_diff_, images_diff_expected_) def testRandomVerticalFlipWithEmptyBoxes(self): preprocess_options = [(preprocessor.random_vertical_flip, {})] images = self.expectedImagesAfterNormalization() boxes = self.createEmptyTestBoxes() tensor_dict = {fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes} images_expected1 = self.expectedImagesAfterUpDownFlip() boxes_expected = self.createEmptyTestBoxes() images_expected2 = images tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options) images = tensor_dict[fields.InputDataFields.image] boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] images_diff1 = tf.squared_difference(images, images_expected1) images_diff2 = tf.squared_difference(images, images_expected2) images_diff = tf.multiply(images_diff1, images_diff2) images_diff_expected = tf.zeros_like(images_diff) with self.test_session() as sess: (images_diff_, images_diff_expected_, boxes_, boxes_expected_) = sess.run([images_diff, images_diff_expected, boxes, boxes_expected]) self.assertAllClose(boxes_, boxes_expected_) self.assertAllClose(images_diff_, images_diff_expected_) def testRandomVerticalFlipWithCache(self): keypoint_flip_permutation = self.createKeypointFlipPermutation() preprocess_options = [ (preprocessor.random_vertical_flip, {'keypoint_flip_permutation': keypoint_flip_permutation})] self._testPreprocessorCache(preprocess_options, test_boxes=True, test_masks=True, test_keypoints=True) def testRunRandomVerticalFlipWithMaskAndKeypoints(self): preprocess_options = [(preprocessor.random_vertical_flip, {})] image_height = 3 image_width = 3 images = tf.random_uniform([1, image_height, image_width, 3]) boxes = self.createTestBoxes() masks = self.createTestMasks() keypoints = self.createTestKeypoints() keypoint_flip_permutation = self.createKeypointFlipPermutation() tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_instance_masks: masks, fields.InputDataFields.groundtruth_keypoints: keypoints } preprocess_options = [ (preprocessor.random_vertical_flip, {'keypoint_flip_permutation': keypoint_flip_permutation})] preprocessor_arg_map = preprocessor.get_default_func_arg_map( include_instance_masks=True, include_keypoints=True) tensor_dict = preprocessor.preprocess( tensor_dict, preprocess_options, func_arg_map=preprocessor_arg_map) boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] masks = tensor_dict[fields.InputDataFields.groundtruth_instance_masks] keypoints = tensor_dict[fields.InputDataFields.groundtruth_keypoints] with self.test_session() as sess: boxes, masks, keypoints = sess.run([boxes, masks, keypoints]) self.assertTrue(boxes is not None) self.assertTrue(masks is not None) self.assertTrue(keypoints is not None) def testRandomRotation90(self): preprocess_options = [(preprocessor.random_rotation90, {})] images = self.expectedImagesAfterNormalization() boxes = self.createTestBoxes() tensor_dict = {fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes} images_expected1 = self.expectedImagesAfterRot90() boxes_expected1 = self.expectedBoxesAfterRot90() images_expected2 = images boxes_expected2 = boxes tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options) images = tensor_dict[fields.InputDataFields.image] boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] boxes_diff1 = tf.squared_difference(boxes, boxes_expected1) boxes_diff2 = tf.squared_difference(boxes, boxes_expected2) boxes_diff = tf.multiply(boxes_diff1, boxes_diff2) boxes_diff_expected = tf.zeros_like(boxes_diff) images_diff1 = tf.squared_difference(images, images_expected1) images_diff2 = tf.squared_difference(images, images_expected2) images_diff = tf.multiply(images_diff1, images_diff2) images_diff_expected = tf.zeros_like(images_diff) with self.test_session() as sess: (images_diff_, images_diff_expected_, boxes_diff_, boxes_diff_expected_) = sess.run([images_diff, images_diff_expected, boxes_diff, boxes_diff_expected]) self.assertAllClose(boxes_diff_, boxes_diff_expected_) self.assertAllClose(images_diff_, images_diff_expected_) def testRandomRotation90WithEmptyBoxes(self): preprocess_options = [(preprocessor.random_rotation90, {})] images = self.expectedImagesAfterNormalization() boxes = self.createEmptyTestBoxes() tensor_dict = {fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes} images_expected1 = self.expectedImagesAfterRot90() boxes_expected = self.createEmptyTestBoxes() images_expected2 = images tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options) images = tensor_dict[fields.InputDataFields.image] boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] images_diff1 = tf.squared_difference(images, images_expected1) images_diff2 = tf.squared_difference(images, images_expected2) images_diff = tf.multiply(images_diff1, images_diff2) images_diff_expected = tf.zeros_like(images_diff) with self.test_session() as sess: (images_diff_, images_diff_expected_, boxes_, boxes_expected_) = sess.run([images_diff, images_diff_expected, boxes, boxes_expected]) self.assertAllClose(boxes_, boxes_expected_) self.assertAllClose(images_diff_, images_diff_expected_) def testRandomRotation90WithCache(self): preprocess_options = [(preprocessor.random_rotation90, {})] self._testPreprocessorCache(preprocess_options, test_boxes=True, test_masks=True, test_keypoints=True) def testRunRandomRotation90WithMaskAndKeypoints(self): preprocess_options = [(preprocessor.random_rotation90, {})] image_height = 3 image_width = 3 images = tf.random_uniform([1, image_height, image_width, 3]) boxes = self.createTestBoxes() masks = self.createTestMasks() keypoints = self.createTestKeypoints() tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_instance_masks: masks, fields.InputDataFields.groundtruth_keypoints: keypoints } preprocessor_arg_map = preprocessor.get_default_func_arg_map( include_instance_masks=True, include_keypoints=True) tensor_dict = preprocessor.preprocess( tensor_dict, preprocess_options, func_arg_map=preprocessor_arg_map) boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] masks = tensor_dict[fields.InputDataFields.groundtruth_instance_masks] keypoints = tensor_dict[fields.InputDataFields.groundtruth_keypoints] with self.test_session() as sess: boxes, masks, keypoints = sess.run([boxes, masks, keypoints]) self.assertTrue(boxes is not None) self.assertTrue(masks is not None) self.assertTrue(keypoints is not None) def testRandomPixelValueScale(self): preprocessing_options = [] preprocessing_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocessing_options.append((preprocessor.random_pixel_value_scale, {})) images = self.createTestImages() tensor_dict = {fields.InputDataFields.image: images} tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) images_min = tf.to_float(images) * 0.9 / 255.0 images_max = tf.to_float(images) * 1.1 / 255.0 images = tensor_dict[fields.InputDataFields.image] values_greater = tf.greater_equal(images, images_min) values_less = tf.less_equal(images, images_max) values_true = tf.fill([1, 4, 4, 3], True) with self.test_session() as sess: (values_greater_, values_less_, values_true_) = sess.run( [values_greater, values_less, values_true]) self.assertAllClose(values_greater_, values_true_) self.assertAllClose(values_less_, values_true_) def testRandomPixelValueScaleWithCache(self): preprocess_options = [] preprocess_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocess_options.append((preprocessor.random_pixel_value_scale, {})) self._testPreprocessorCache(preprocess_options, test_boxes=True, test_masks=False, test_keypoints=False) def testRandomImageScale(self): preprocess_options = [(preprocessor.random_image_scale, {})] images_original = self.createTestImages() tensor_dict = {fields.InputDataFields.image: images_original} tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options) images_scaled = tensor_dict[fields.InputDataFields.image] images_original_shape = tf.shape(images_original) images_scaled_shape = tf.shape(images_scaled) with self.test_session() as sess: (images_original_shape_, images_scaled_shape_) = sess.run( [images_original_shape, images_scaled_shape]) self.assertTrue( images_original_shape_[1] * 0.5 <= images_scaled_shape_[1]) self.assertTrue( images_original_shape_[1] * 2.0 >= images_scaled_shape_[1]) self.assertTrue( images_original_shape_[2] * 0.5 <= images_scaled_shape_[2]) self.assertTrue( images_original_shape_[2] * 2.0 >= images_scaled_shape_[2]) def testRandomImageScaleWithCache(self): preprocess_options = [(preprocessor.random_image_scale, {})] self._testPreprocessorCache(preprocess_options, test_boxes=False, test_masks=False, test_keypoints=False) def testRandomRGBtoGray(self): preprocess_options = [(preprocessor.random_rgb_to_gray, {})] images_original = self.createTestImages() tensor_dict = {fields.InputDataFields.image: images_original} tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options) images_gray = tensor_dict[fields.InputDataFields.image] images_gray_r, images_gray_g, images_gray_b = tf.split( value=images_gray, num_or_size_splits=3, axis=3) images_r, images_g, images_b = tf.split( value=images_original, num_or_size_splits=3, axis=3) images_r_diff1 = tf.squared_difference(tf.to_float(images_r), tf.to_float(images_gray_r)) images_r_diff2 = tf.squared_difference(tf.to_float(images_gray_r), tf.to_float(images_gray_g)) images_r_diff = tf.multiply(images_r_diff1, images_r_diff2) images_g_diff1 = tf.squared_difference(tf.to_float(images_g), tf.to_float(images_gray_g)) images_g_diff2 = tf.squared_difference(tf.to_float(images_gray_g), tf.to_float(images_gray_b)) images_g_diff = tf.multiply(images_g_diff1, images_g_diff2) images_b_diff1 = tf.squared_difference(tf.to_float(images_b), tf.to_float(images_gray_b)) images_b_diff2 = tf.squared_difference(tf.to_float(images_gray_b), tf.to_float(images_gray_r)) images_b_diff = tf.multiply(images_b_diff1, images_b_diff2) image_zero1 = tf.constant(0, dtype=tf.float32, shape=[1, 4, 4, 1]) with self.test_session() as sess: (images_r_diff_, images_g_diff_, images_b_diff_, image_zero1_) = sess.run( [images_r_diff, images_g_diff, images_b_diff, image_zero1]) self.assertAllClose(images_r_diff_, image_zero1_) self.assertAllClose(images_g_diff_, image_zero1_) self.assertAllClose(images_b_diff_, image_zero1_) def testRandomRGBtoGrayWithCache(self): preprocess_options = [( preprocessor.random_rgb_to_gray, {'probability': 0.5})] self._testPreprocessorCache(preprocess_options, test_boxes=False, test_masks=False, test_keypoints=False) def testRandomAdjustBrightness(self): preprocessing_options = [] preprocessing_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocessing_options.append((preprocessor.random_adjust_brightness, {})) images_original = self.createTestImages() tensor_dict = {fields.InputDataFields.image: images_original} tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) images_bright = tensor_dict[fields.InputDataFields.image] image_original_shape = tf.shape(images_original) image_bright_shape = tf.shape(images_bright) with self.test_session() as sess: (image_original_shape_, image_bright_shape_) = sess.run( [image_original_shape, image_bright_shape]) self.assertAllEqual(image_original_shape_, image_bright_shape_) def testRandomAdjustBrightnessWithCache(self): preprocess_options = [] preprocess_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocess_options.append((preprocessor.random_adjust_brightness, {})) self._testPreprocessorCache(preprocess_options, test_boxes=False, test_masks=False, test_keypoints=False) def testRandomAdjustContrast(self): preprocessing_options = [] preprocessing_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocessing_options.append((preprocessor.random_adjust_contrast, {})) images_original = self.createTestImages() tensor_dict = {fields.InputDataFields.image: images_original} tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) images_contrast = tensor_dict[fields.InputDataFields.image] image_original_shape = tf.shape(images_original) image_contrast_shape = tf.shape(images_contrast) with self.test_session() as sess: (image_original_shape_, image_contrast_shape_) = sess.run( [image_original_shape, image_contrast_shape]) self.assertAllEqual(image_original_shape_, image_contrast_shape_) def testRandomAdjustContrastWithCache(self): preprocess_options = [] preprocess_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocess_options.append((preprocessor.random_adjust_contrast, {})) self._testPreprocessorCache(preprocess_options, test_boxes=False, test_masks=False, test_keypoints=False) def testRandomAdjustHue(self): preprocessing_options = [] preprocessing_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocessing_options.append((preprocessor.random_adjust_hue, {})) images_original = self.createTestImages() tensor_dict = {fields.InputDataFields.image: images_original} tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) images_hue = tensor_dict[fields.InputDataFields.image] image_original_shape = tf.shape(images_original) image_hue_shape = tf.shape(images_hue) with self.test_session() as sess: (image_original_shape_, image_hue_shape_) = sess.run( [image_original_shape, image_hue_shape]) self.assertAllEqual(image_original_shape_, image_hue_shape_) def testRandomAdjustHueWithCache(self): preprocess_options = [] preprocess_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocess_options.append((preprocessor.random_adjust_hue, {})) self._testPreprocessorCache(preprocess_options, test_boxes=False, test_masks=False, test_keypoints=False) def testRandomDistortColor(self): preprocessing_options = [] preprocessing_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocessing_options.append((preprocessor.random_distort_color, {})) images_original = self.createTestImages() images_original_shape = tf.shape(images_original) tensor_dict = {fields.InputDataFields.image: images_original} tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) images_distorted_color = tensor_dict[fields.InputDataFields.image] images_distorted_color_shape = tf.shape(images_distorted_color) with self.test_session() as sess: (images_original_shape_, images_distorted_color_shape_) = sess.run( [images_original_shape, images_distorted_color_shape]) self.assertAllEqual(images_original_shape_, images_distorted_color_shape_) def testRandomDistortColorWithCache(self): preprocess_options = [] preprocess_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocess_options.append((preprocessor.random_distort_color, {})) self._testPreprocessorCache(preprocess_options, test_boxes=False, test_masks=False, test_keypoints=False) def testRandomJitterBoxes(self): preprocessing_options = [] preprocessing_options.append((preprocessor.random_jitter_boxes, {})) boxes = self.createTestBoxes() boxes_shape = tf.shape(boxes) tensor_dict = {fields.InputDataFields.groundtruth_boxes: boxes} tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) distorted_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] distorted_boxes_shape = tf.shape(distorted_boxes) with self.test_session() as sess: (boxes_shape_, distorted_boxes_shape_) = sess.run( [boxes_shape, distorted_boxes_shape]) self.assertAllEqual(boxes_shape_, distorted_boxes_shape_) def testRandomCropImage(self): preprocessing_options = [] preprocessing_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocessing_options.append((preprocessor.random_crop_image, {})) images = self.createTestImages() boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights, } distorted_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) distorted_images = distorted_tensor_dict[fields.InputDataFields.image] distorted_boxes = distorted_tensor_dict[ fields.InputDataFields.groundtruth_boxes] boxes_rank = tf.rank(boxes) distorted_boxes_rank = tf.rank(distorted_boxes) images_rank = tf.rank(images) distorted_images_rank = tf.rank(distorted_images) self.assertEqual(3, distorted_images.get_shape()[3]) with self.test_session() as sess: (boxes_rank_, distorted_boxes_rank_, images_rank_, distorted_images_rank_) = sess.run([ boxes_rank, distorted_boxes_rank, images_rank, distorted_images_rank ]) self.assertAllEqual(boxes_rank_, distorted_boxes_rank_) self.assertAllEqual(images_rank_, distorted_images_rank_) def testRandomCropImageWithCache(self): preprocess_options = [(preprocessor.random_rgb_to_gray, {'probability': 0.5}), (preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1, }), (preprocessor.random_crop_image, {})] self._testPreprocessorCache(preprocess_options, test_boxes=True, test_masks=False, test_keypoints=False) def testRandomCropImageGrayscale(self): preprocessing_options = [(preprocessor.rgb_to_gray, {}), (preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1, }), (preprocessor.random_crop_image, {})] images = self.createTestImages() boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights, } distorted_tensor_dict = preprocessor.preprocess( tensor_dict, preprocessing_options) distorted_images = distorted_tensor_dict[fields.InputDataFields.image] distorted_boxes = distorted_tensor_dict[ fields.InputDataFields.groundtruth_boxes] boxes_rank = tf.rank(boxes) distorted_boxes_rank = tf.rank(distorted_boxes) images_rank = tf.rank(images) distorted_images_rank = tf.rank(distorted_images) self.assertEqual(1, distorted_images.get_shape()[3]) with self.test_session() as sess: session_results = sess.run([ boxes_rank, distorted_boxes_rank, images_rank, distorted_images_rank ]) (boxes_rank_, distorted_boxes_rank_, images_rank_, distorted_images_rank_) = session_results self.assertAllEqual(boxes_rank_, distorted_boxes_rank_) self.assertAllEqual(images_rank_, distorted_images_rank_) def testRandomCropImageWithBoxOutOfImage(self): preprocessing_options = [] preprocessing_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocessing_options.append((preprocessor.random_crop_image, {})) images = self.createTestImages() boxes = self.createTestBoxesOutOfImage() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights, } distorted_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) distorted_images = distorted_tensor_dict[fields.InputDataFields.image] distorted_boxes = distorted_tensor_dict[ fields.InputDataFields.groundtruth_boxes] boxes_rank = tf.rank(boxes) distorted_boxes_rank = tf.rank(distorted_boxes) images_rank = tf.rank(images) distorted_images_rank = tf.rank(distorted_images) with self.test_session() as sess: (boxes_rank_, distorted_boxes_rank_, images_rank_, distorted_images_rank_) = sess.run( [boxes_rank, distorted_boxes_rank, images_rank, distorted_images_rank]) self.assertAllEqual(boxes_rank_, distorted_boxes_rank_) self.assertAllEqual(images_rank_, distorted_images_rank_) def testRandomCropImageWithRandomCoefOne(self): preprocessing_options = [(preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })] images = self.createTestImages() boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights } tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) images = tensor_dict[fields.InputDataFields.image] preprocessing_options = [(preprocessor.random_crop_image, { 'random_coef': 1.0 })] distorted_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) distorted_images = distorted_tensor_dict[fields.InputDataFields.image] distorted_boxes = distorted_tensor_dict[ fields.InputDataFields.groundtruth_boxes] distorted_labels = distorted_tensor_dict[ fields.InputDataFields.groundtruth_classes] distorted_weights = distorted_tensor_dict[ fields.InputDataFields.groundtruth_weights] boxes_shape = tf.shape(boxes) distorted_boxes_shape = tf.shape(distorted_boxes) images_shape = tf.shape(images) distorted_images_shape = tf.shape(distorted_images) with self.test_session() as sess: (boxes_shape_, distorted_boxes_shape_, images_shape_, distorted_images_shape_, images_, distorted_images_, boxes_, distorted_boxes_, labels_, distorted_labels_, weights_, distorted_weights_) = sess.run( [boxes_shape, distorted_boxes_shape, images_shape, distorted_images_shape, images, distorted_images, boxes, distorted_boxes, labels, distorted_labels, weights, distorted_weights]) self.assertAllEqual(boxes_shape_, distorted_boxes_shape_) self.assertAllEqual(images_shape_, distorted_images_shape_) self.assertAllClose(images_, distorted_images_) self.assertAllClose(boxes_, distorted_boxes_) self.assertAllEqual(labels_, distorted_labels_) self.assertAllEqual(weights_, distorted_weights_) def testRandomCropWithMockSampleDistortedBoundingBox(self): preprocessing_options = [(preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })] images = self.createColorfulTestImage() boxes = tf.constant([[0.1, 0.1, 0.8, 0.3], [0.2, 0.4, 0.75, 0.75], [0.3, 0.1, 0.4, 0.7]], dtype=tf.float32) labels = tf.constant([1, 7, 11], dtype=tf.int32) weights = tf.constant([1.0, 0.5, 0.6], dtype=tf.float32) tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights, } tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) images = tensor_dict[fields.InputDataFields.image] preprocessing_options = [(preprocessor.random_crop_image, {})] with mock.patch.object( tf.image, 'sample_distorted_bounding_box') as mock_sample_distorted_bounding_box: mock_sample_distorted_bounding_box.return_value = (tf.constant( [6, 143, 0], dtype=tf.int32), tf.constant( [190, 237, -1], dtype=tf.int32), tf.constant( [[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)) distorted_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) distorted_boxes = distorted_tensor_dict[ fields.InputDataFields.groundtruth_boxes] distorted_labels = distorted_tensor_dict[ fields.InputDataFields.groundtruth_classes] distorted_weights = distorted_tensor_dict[ fields.InputDataFields.groundtruth_weights] expected_boxes = tf.constant([[0.178947, 0.07173, 0.75789469, 0.66244733], [0.28421, 0.0, 0.38947365, 0.57805908]], dtype=tf.float32) expected_labels = tf.constant([7, 11], dtype=tf.int32) expected_weights = tf.constant([0.5, 0.6], dtype=tf.float32) with self.test_session() as sess: (distorted_boxes_, distorted_labels_, distorted_weights_, expected_boxes_, expected_labels_, expected_weights_) = sess.run( [distorted_boxes, distorted_labels, distorted_weights, expected_boxes, expected_labels, expected_weights]) self.assertAllClose(distorted_boxes_, expected_boxes_) self.assertAllEqual(distorted_labels_, expected_labels_) self.assertAllEqual(distorted_weights_, expected_weights_) def testRandomCropWithoutClipBoxes(self): preprocessing_options = [(preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })] images = self.createColorfulTestImage() boxes = tf.constant([[0.1, 0.1, 0.8, 0.3], [0.2, 0.4, 0.75, 0.75], [0.3, 0.1, 0.4, 0.7]], dtype=tf.float32) keypoints = tf.constant([ [[0.1, 0.1], [0.8, 0.3]], [[0.2, 0.4], [0.75, 0.75]], [[0.3, 0.1], [0.4, 0.7]], ], dtype=tf.float32) labels = tf.constant([1, 7, 11], dtype=tf.int32) weights = tf.constant([1.0, 0.5, 0.6], dtype=tf.float32) tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_keypoints: keypoints, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights, } tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) preprocessing_options = [(preprocessor.random_crop_image, { 'clip_boxes': False, })] with mock.patch.object( tf.image, 'sample_distorted_bounding_box') as mock_sample_distorted_bounding_box: mock_sample_distorted_bounding_box.return_value = (tf.constant( [6, 143, 0], dtype=tf.int32), tf.constant( [190, 237, -1], dtype=tf.int32), tf.constant( [[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)) preprocessor_arg_map = preprocessor.get_default_func_arg_map( include_keypoints=True) distorted_tensor_dict = preprocessor.preprocess( tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) distorted_boxes = distorted_tensor_dict[ fields.InputDataFields.groundtruth_boxes] distorted_keypoints = distorted_tensor_dict[ fields.InputDataFields.groundtruth_keypoints] distorted_labels = distorted_tensor_dict[ fields.InputDataFields.groundtruth_classes] distorted_weights = distorted_tensor_dict[ fields.InputDataFields.groundtruth_weights] expected_boxes = tf.constant( [[0.178947, 0.07173, 0.75789469, 0.66244733], [0.28421, -0.434599, 0.38947365, 0.57805908]], dtype=tf.float32) expected_keypoints = tf.constant( [[[0.178947, 0.07173], [0.75789469, 0.66244733]], [[0.28421, -0.434599], [0.38947365, 0.57805908]]], dtype=tf.float32) expected_labels = tf.constant([7, 11], dtype=tf.int32) expected_weights = tf.constant([0.5, 0.6], dtype=tf.float32) with self.test_session() as sess: (distorted_boxes_, distorted_keypoints_, distorted_labels_, distorted_weights_, expected_boxes_, expected_keypoints_, expected_labels_, expected_weights_) = sess.run( [distorted_boxes, distorted_keypoints, distorted_labels, distorted_weights, expected_boxes, expected_keypoints, expected_labels, expected_weights]) self.assertAllClose(distorted_boxes_, expected_boxes_) self.assertAllClose(distorted_keypoints_, expected_keypoints_) self.assertAllEqual(distorted_labels_, expected_labels_) self.assertAllEqual(distorted_weights_, expected_weights_) def testRandomCropImageWithMultiClassScores(self): preprocessing_options = [] preprocessing_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocessing_options.append((preprocessor.random_crop_image, {})) images = self.createTestImages() boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() multiclass_scores = self.createTestMultiClassScores() tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights, fields.InputDataFields.multiclass_scores: multiclass_scores } distorted_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) distorted_images = distorted_tensor_dict[fields.InputDataFields.image] distorted_boxes = distorted_tensor_dict[ fields.InputDataFields.groundtruth_boxes] distorted_multiclass_scores = distorted_tensor_dict[ fields.InputDataFields.multiclass_scores] boxes_rank = tf.rank(boxes) distorted_boxes_rank = tf.rank(distorted_boxes) images_rank = tf.rank(images) distorted_images_rank = tf.rank(distorted_images) multiclass_scores_rank = tf.rank(multiclass_scores) distorted_multiclass_scores_rank = tf.rank(distorted_multiclass_scores) with self.test_session() as sess: (boxes_rank_, distorted_boxes_, distorted_boxes_rank_, images_rank_, distorted_images_rank_, multiclass_scores_rank_, distorted_multiclass_scores_rank_, distorted_multiclass_scores_) = sess.run([ boxes_rank, distorted_boxes, distorted_boxes_rank, images_rank, distorted_images_rank, multiclass_scores_rank, distorted_multiclass_scores_rank, distorted_multiclass_scores ]) self.assertAllEqual(boxes_rank_, distorted_boxes_rank_) self.assertAllEqual(images_rank_, distorted_images_rank_) self.assertAllEqual(multiclass_scores_rank_, distorted_multiclass_scores_rank_) self.assertAllEqual(distorted_boxes_.shape[0], distorted_multiclass_scores_.shape[0]) def testStrictRandomCropImageWithGroundtruthWeights(self): image = self.createColorfulTestImage()[0] boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() with mock.patch.object( tf.image, 'sample_distorted_bounding_box' ) as mock_sample_distorted_bounding_box: mock_sample_distorted_bounding_box.return_value = ( tf.constant([6, 143, 0], dtype=tf.int32), tf.constant([190, 237, -1], dtype=tf.int32), tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)) new_image, new_boxes, new_labels, new_groundtruth_weights = ( preprocessor._strict_random_crop_image( image, boxes, labels, weights)) with self.test_session() as sess: new_image, new_boxes, new_labels, new_groundtruth_weights = ( sess.run( [new_image, new_boxes, new_labels, new_groundtruth_weights]) ) expected_boxes = np.array( [[0.0, 0.0, 0.75789469, 1.0], [0.23157893, 0.24050637, 0.75789469, 1.0]], dtype=np.float32) self.assertAllEqual(new_image.shape, [190, 237, 3]) self.assertAllEqual(new_groundtruth_weights, [1.0, 0.5]) self.assertAllClose( new_boxes.flatten(), expected_boxes.flatten()) def testStrictRandomCropImageWithMasks(self): image = self.createColorfulTestImage()[0] boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() masks = tf.random_uniform([2, 200, 400], dtype=tf.float32) with mock.patch.object( tf.image, 'sample_distorted_bounding_box' ) as mock_sample_distorted_bounding_box: mock_sample_distorted_bounding_box.return_value = ( tf.constant([6, 143, 0], dtype=tf.int32), tf.constant([190, 237, -1], dtype=tf.int32), tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)) new_image, new_boxes, new_labels, new_weights, new_masks = ( preprocessor._strict_random_crop_image( image, boxes, labels, weights, masks=masks)) with self.test_session() as sess: new_image, new_boxes, new_labels, new_weights, new_masks = sess.run( [new_image, new_boxes, new_labels, new_weights, new_masks]) expected_boxes = np.array( [[0.0, 0.0, 0.75789469, 1.0], [0.23157893, 0.24050637, 0.75789469, 1.0]], dtype=np.float32) self.assertAllEqual(new_image.shape, [190, 237, 3]) self.assertAllEqual(new_masks.shape, [2, 190, 237]) self.assertAllClose( new_boxes.flatten(), expected_boxes.flatten()) def testStrictRandomCropImageWithKeypoints(self): image = self.createColorfulTestImage()[0] boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() keypoints = self.createTestKeypoints() with mock.patch.object( tf.image, 'sample_distorted_bounding_box' ) as mock_sample_distorted_bounding_box: mock_sample_distorted_bounding_box.return_value = ( tf.constant([6, 143, 0], dtype=tf.int32), tf.constant([190, 237, -1], dtype=tf.int32), tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)) new_image, new_boxes, new_labels, new_weights, new_keypoints = ( preprocessor._strict_random_crop_image( image, boxes, labels, weights, keypoints=keypoints)) with self.test_session() as sess: new_image, new_boxes, new_labels, new_weights, new_keypoints = sess.run( [new_image, new_boxes, new_labels, new_weights, new_keypoints]) expected_boxes = np.array([ [0.0, 0.0, 0.75789469, 1.0], [0.23157893, 0.24050637, 0.75789469, 1.0],], dtype=np.float32) expected_keypoints = np.array([ [[np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]], [[0.38947368, 0.07173], [0.49473682, 0.24050637], [0.60000002, 0.40928277]] ], dtype=np.float32) self.assertAllEqual(new_image.shape, [190, 237, 3]) self.assertAllClose( new_boxes.flatten(), expected_boxes.flatten()) self.assertAllClose( new_keypoints.flatten(), expected_keypoints.flatten()) def testRunRandomCropImageWithMasks(self): image = self.createColorfulTestImage() boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() masks = tf.random_uniform([2, 200, 400], dtype=tf.float32) tensor_dict = { fields.InputDataFields.image: image, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights, fields.InputDataFields.groundtruth_instance_masks: masks, } preprocessor_arg_map = preprocessor.get_default_func_arg_map( include_instance_masks=True) preprocessing_options = [(preprocessor.random_crop_image, {})] with mock.patch.object( tf.image, 'sample_distorted_bounding_box' ) as mock_sample_distorted_bounding_box: mock_sample_distorted_bounding_box.return_value = ( tf.constant([6, 143, 0], dtype=tf.int32), tf.constant([190, 237, -1], dtype=tf.int32), tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)) distorted_tensor_dict = preprocessor.preprocess( tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) distorted_image = distorted_tensor_dict[fields.InputDataFields.image] distorted_boxes = distorted_tensor_dict[ fields.InputDataFields.groundtruth_boxes] distorted_labels = distorted_tensor_dict[ fields.InputDataFields.groundtruth_classes] distorted_masks = distorted_tensor_dict[ fields.InputDataFields.groundtruth_instance_masks] with self.test_session() as sess: (distorted_image_, distorted_boxes_, distorted_labels_, distorted_masks_) = sess.run( [distorted_image, distorted_boxes, distorted_labels, distorted_masks]) expected_boxes = np.array([ [0.0, 0.0, 0.75789469, 1.0], [0.23157893, 0.24050637, 0.75789469, 1.0], ], dtype=np.float32) self.assertAllEqual(distorted_image_.shape, [1, 190, 237, 3]) self.assertAllEqual(distorted_masks_.shape, [2, 190, 237]) self.assertAllEqual(distorted_labels_, [1, 2]) self.assertAllClose( distorted_boxes_.flatten(), expected_boxes.flatten()) def testRunRandomCropImageWithKeypointsInsideCrop(self): image = self.createColorfulTestImage() boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() keypoints = self.createTestKeypointsInsideCrop() tensor_dict = { fields.InputDataFields.image: image, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_keypoints: keypoints, fields.InputDataFields.groundtruth_weights: weights } preprocessor_arg_map = preprocessor.get_default_func_arg_map( include_keypoints=True) preprocessing_options = [(preprocessor.random_crop_image, {})] with mock.patch.object( tf.image, 'sample_distorted_bounding_box' ) as mock_sample_distorted_bounding_box: mock_sample_distorted_bounding_box.return_value = ( tf.constant([6, 143, 0], dtype=tf.int32), tf.constant([190, 237, -1], dtype=tf.int32), tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)) distorted_tensor_dict = preprocessor.preprocess( tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) distorted_image = distorted_tensor_dict[fields.InputDataFields.image] distorted_boxes = distorted_tensor_dict[ fields.InputDataFields.groundtruth_boxes] distorted_labels = distorted_tensor_dict[ fields.InputDataFields.groundtruth_classes] distorted_keypoints = distorted_tensor_dict[ fields.InputDataFields.groundtruth_keypoints] with self.test_session() as sess: (distorted_image_, distorted_boxes_, distorted_labels_, distorted_keypoints_) = sess.run( [distorted_image, distorted_boxes, distorted_labels, distorted_keypoints]) expected_boxes = np.array([ [0.0, 0.0, 0.75789469, 1.0], [0.23157893, 0.24050637, 0.75789469, 1.0], ], dtype=np.float32) expected_keypoints = np.array([ [[0.38947368, 0.07173], [0.49473682, 0.24050637], [0.60000002, 0.40928277]], [[0.38947368, 0.07173], [0.49473682, 0.24050637], [0.60000002, 0.40928277]] ]) self.assertAllEqual(distorted_image_.shape, [1, 190, 237, 3]) self.assertAllEqual(distorted_labels_, [1, 2]) self.assertAllClose( distorted_boxes_.flatten(), expected_boxes.flatten()) self.assertAllClose( distorted_keypoints_.flatten(), expected_keypoints.flatten()) def testRunRandomCropImageWithKeypointsOutsideCrop(self): image = self.createColorfulTestImage() boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() keypoints = self.createTestKeypointsOutsideCrop() tensor_dict = { fields.InputDataFields.image: image, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights, fields.InputDataFields.groundtruth_keypoints: keypoints } preprocessor_arg_map = preprocessor.get_default_func_arg_map( include_keypoints=True) preprocessing_options = [(preprocessor.random_crop_image, {})] with mock.patch.object( tf.image, 'sample_distorted_bounding_box' ) as mock_sample_distorted_bounding_box: mock_sample_distorted_bounding_box.return_value = ( tf.constant([6, 143, 0], dtype=tf.int32), tf.constant([190, 237, -1], dtype=tf.int32), tf.constant([[[0.03, 0.3575, 0.98, 0.95]]], dtype=tf.float32)) distorted_tensor_dict = preprocessor.preprocess( tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) distorted_image = distorted_tensor_dict[fields.InputDataFields.image] distorted_boxes = distorted_tensor_dict[ fields.InputDataFields.groundtruth_boxes] distorted_labels = distorted_tensor_dict[ fields.InputDataFields.groundtruth_classes] distorted_keypoints = distorted_tensor_dict[ fields.InputDataFields.groundtruth_keypoints] with self.test_session() as sess: (distorted_image_, distorted_boxes_, distorted_labels_, distorted_keypoints_) = sess.run( [distorted_image, distorted_boxes, distorted_labels, distorted_keypoints]) expected_boxes = np.array([ [0.0, 0.0, 0.75789469, 1.0], [0.23157893, 0.24050637, 0.75789469, 1.0], ], dtype=np.float32) expected_keypoints = np.array([ [[np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]], [[np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]], ]) self.assertAllEqual(distorted_image_.shape, [1, 190, 237, 3]) self.assertAllEqual(distorted_labels_, [1, 2]) self.assertAllClose( distorted_boxes_.flatten(), expected_boxes.flatten()) self.assertAllClose( distorted_keypoints_.flatten(), expected_keypoints.flatten()) def testRunRetainBoxesAboveThreshold(self): boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() tensor_dict = { fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights, } preprocessing_options = [ (preprocessor.retain_boxes_above_threshold, {'threshold': 0.6}) ] preprocessor_arg_map = preprocessor.get_default_func_arg_map() retained_tensor_dict = preprocessor.preprocess( tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) retained_boxes = retained_tensor_dict[ fields.InputDataFields.groundtruth_boxes] retained_labels = retained_tensor_dict[ fields.InputDataFields.groundtruth_classes] retained_weights = retained_tensor_dict[ fields.InputDataFields.groundtruth_weights] with self.test_session() as sess: (retained_boxes_, retained_labels_, retained_weights_, expected_retained_boxes_, expected_retained_labels_, expected_retained_weights_) = sess.run( [retained_boxes, retained_labels, retained_weights, self.expectedBoxesAfterThresholding(), self.expectedLabelsAfterThresholding(), self.expectedLabelScoresAfterThresholding()]) self.assertAllClose(retained_boxes_, expected_retained_boxes_) self.assertAllClose(retained_labels_, expected_retained_labels_) self.assertAllClose( retained_weights_, expected_retained_weights_) def testRunRetainBoxesAboveThresholdWithMasks(self): boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() masks = self.createTestMasks() tensor_dict = { fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights, fields.InputDataFields.groundtruth_instance_masks: masks } preprocessor_arg_map = preprocessor.get_default_func_arg_map( include_label_weights=True, include_instance_masks=True) preprocessing_options = [ (preprocessor.retain_boxes_above_threshold, {'threshold': 0.6}) ] retained_tensor_dict = preprocessor.preprocess( tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) retained_masks = retained_tensor_dict[ fields.InputDataFields.groundtruth_instance_masks] with self.test_session() as sess: (retained_masks_, expected_masks_) = sess.run( [retained_masks, self.expectedMasksAfterThresholding()]) self.assertAllClose(retained_masks_, expected_masks_) def testRunRetainBoxesAboveThresholdWithKeypoints(self): boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() keypoints = self.createTestKeypoints() tensor_dict = { fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights, fields.InputDataFields.groundtruth_keypoints: keypoints } preprocessor_arg_map = preprocessor.get_default_func_arg_map( include_keypoints=True) preprocessing_options = [ (preprocessor.retain_boxes_above_threshold, {'threshold': 0.6}) ] retained_tensor_dict = preprocessor.preprocess( tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) retained_keypoints = retained_tensor_dict[ fields.InputDataFields.groundtruth_keypoints] with self.test_session() as sess: (retained_keypoints_, expected_keypoints_) = sess.run( [retained_keypoints, self.expectedKeypointsAfterThresholding()]) self.assertAllClose(retained_keypoints_, expected_keypoints_) def testRandomCropToAspectRatioWithCache(self): preprocess_options = [(preprocessor.random_crop_to_aspect_ratio, {})] self._testPreprocessorCache(preprocess_options, test_boxes=True, test_masks=False, test_keypoints=False) def testRunRandomCropToAspectRatioWithMasks(self): image = self.createColorfulTestImage() boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() masks = tf.random_uniform([2, 200, 400], dtype=tf.float32) tensor_dict = { fields.InputDataFields.image: image, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights, fields.InputDataFields.groundtruth_instance_masks: masks } preprocessor_arg_map = preprocessor.get_default_func_arg_map( include_instance_masks=True) preprocessing_options = [(preprocessor.random_crop_to_aspect_ratio, {})] with mock.patch.object(preprocessor, '_random_integer') as mock_random_integer: mock_random_integer.return_value = tf.constant(0, dtype=tf.int32) distorted_tensor_dict = preprocessor.preprocess( tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) distorted_image = distorted_tensor_dict[fields.InputDataFields.image] distorted_boxes = distorted_tensor_dict[ fields.InputDataFields.groundtruth_boxes] distorted_labels = distorted_tensor_dict[ fields.InputDataFields.groundtruth_classes] distorted_masks = distorted_tensor_dict[ fields.InputDataFields.groundtruth_instance_masks] with self.test_session() as sess: (distorted_image_, distorted_boxes_, distorted_labels_, distorted_masks_) = sess.run([ distorted_image, distorted_boxes, distorted_labels, distorted_masks ]) expected_boxes = np.array([0.0, 0.5, 0.75, 1.0], dtype=np.float32) self.assertAllEqual(distorted_image_.shape, [1, 200, 200, 3]) self.assertAllEqual(distorted_labels_, [1]) self.assertAllClose(distorted_boxes_.flatten(), expected_boxes.flatten()) self.assertAllEqual(distorted_masks_.shape, [1, 200, 200]) def testRunRandomCropToAspectRatioWithKeypoints(self): image = self.createColorfulTestImage() boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() keypoints = self.createTestKeypoints() tensor_dict = { fields.InputDataFields.image: image, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights, fields.InputDataFields.groundtruth_keypoints: keypoints } preprocessor_arg_map = preprocessor.get_default_func_arg_map( include_keypoints=True) preprocessing_options = [(preprocessor.random_crop_to_aspect_ratio, {})] with mock.patch.object(preprocessor, '_random_integer') as mock_random_integer: mock_random_integer.return_value = tf.constant(0, dtype=tf.int32) distorted_tensor_dict = preprocessor.preprocess( tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) distorted_image = distorted_tensor_dict[fields.InputDataFields.image] distorted_boxes = distorted_tensor_dict[ fields.InputDataFields.groundtruth_boxes] distorted_labels = distorted_tensor_dict[ fields.InputDataFields.groundtruth_classes] distorted_keypoints = distorted_tensor_dict[ fields.InputDataFields.groundtruth_keypoints] with self.test_session() as sess: (distorted_image_, distorted_boxes_, distorted_labels_, distorted_keypoints_) = sess.run([ distorted_image, distorted_boxes, distorted_labels, distorted_keypoints ]) expected_boxes = np.array([0.0, 0.5, 0.75, 1.0], dtype=np.float32) expected_keypoints = np.array( [[0.1, 0.2], [0.2, 0.4], [0.3, 0.6]], dtype=np.float32) self.assertAllEqual(distorted_image_.shape, [1, 200, 200, 3]) self.assertAllEqual(distorted_labels_, [1]) self.assertAllClose(distorted_boxes_.flatten(), expected_boxes.flatten()) self.assertAllClose(distorted_keypoints_.flatten(), expected_keypoints.flatten()) def testRandomPadToAspectRatioWithCache(self): preprocess_options = [(preprocessor.random_pad_to_aspect_ratio, {})] self._testPreprocessorCache(preprocess_options, test_boxes=True, test_masks=True, test_keypoints=True) def testRunRandomPadToAspectRatioWithMinMaxPaddedSizeRatios(self): image = self.createColorfulTestImage() boxes = self.createTestBoxes() labels = self.createTestLabels() tensor_dict = { fields.InputDataFields.image: image, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels } preprocessor_arg_map = preprocessor.get_default_func_arg_map() preprocessing_options = [(preprocessor.random_pad_to_aspect_ratio, {'min_padded_size_ratio': (4.0, 4.0), 'max_padded_size_ratio': (4.0, 4.0)})] distorted_tensor_dict = preprocessor.preprocess( tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) distorted_image = distorted_tensor_dict[fields.InputDataFields.image] distorted_boxes = distorted_tensor_dict[ fields.InputDataFields.groundtruth_boxes] distorted_labels = distorted_tensor_dict[ fields.InputDataFields.groundtruth_classes] with self.test_session() as sess: distorted_image_, distorted_boxes_, distorted_labels_ = sess.run([ distorted_image, distorted_boxes, distorted_labels]) expected_boxes = np.array( [[0.0, 0.125, 0.1875, 0.5], [0.0625, 0.25, 0.1875, 0.5]], dtype=np.float32) self.assertAllEqual(distorted_image_.shape, [1, 800, 800, 3]) self.assertAllEqual(distorted_labels_, [1, 2]) self.assertAllClose(distorted_boxes_.flatten(), expected_boxes.flatten()) def testRunRandomPadToAspectRatioWithMasks(self): image = self.createColorfulTestImage() boxes = self.createTestBoxes() labels = self.createTestLabels() masks = tf.random_uniform([2, 200, 400], dtype=tf.float32) tensor_dict = { fields.InputDataFields.image: image, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_instance_masks: masks } preprocessor_arg_map = preprocessor.get_default_func_arg_map( include_instance_masks=True) preprocessing_options = [(preprocessor.random_pad_to_aspect_ratio, {})] distorted_tensor_dict = preprocessor.preprocess( tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) distorted_image = distorted_tensor_dict[fields.InputDataFields.image] distorted_boxes = distorted_tensor_dict[ fields.InputDataFields.groundtruth_boxes] distorted_labels = distorted_tensor_dict[ fields.InputDataFields.groundtruth_classes] distorted_masks = distorted_tensor_dict[ fields.InputDataFields.groundtruth_instance_masks] with self.test_session() as sess: (distorted_image_, distorted_boxes_, distorted_labels_, distorted_masks_) = sess.run([ distorted_image, distorted_boxes, distorted_labels, distorted_masks ]) expected_boxes = np.array( [[0.0, 0.25, 0.375, 1.0], [0.125, 0.5, 0.375, 1.0]], dtype=np.float32) self.assertAllEqual(distorted_image_.shape, [1, 400, 400, 3]) self.assertAllEqual(distorted_labels_, [1, 2]) self.assertAllClose(distorted_boxes_.flatten(), expected_boxes.flatten()) self.assertAllEqual(distorted_masks_.shape, [2, 400, 400]) def testRunRandomPadToAspectRatioWithKeypoints(self): image = self.createColorfulTestImage() boxes = self.createTestBoxes() labels = self.createTestLabels() keypoints = self.createTestKeypoints() tensor_dict = { fields.InputDataFields.image: image, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_keypoints: keypoints } preprocessor_arg_map = preprocessor.get_default_func_arg_map( include_keypoints=True) preprocessing_options = [(preprocessor.random_pad_to_aspect_ratio, {})] distorted_tensor_dict = preprocessor.preprocess( tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) distorted_image = distorted_tensor_dict[fields.InputDataFields.image] distorted_boxes = distorted_tensor_dict[ fields.InputDataFields.groundtruth_boxes] distorted_labels = distorted_tensor_dict[ fields.InputDataFields.groundtruth_classes] distorted_keypoints = distorted_tensor_dict[ fields.InputDataFields.groundtruth_keypoints] with self.test_session() as sess: (distorted_image_, distorted_boxes_, distorted_labels_, distorted_keypoints_) = sess.run([ distorted_image, distorted_boxes, distorted_labels, distorted_keypoints ]) expected_boxes = np.array( [[0.0, 0.25, 0.375, 1.0], [0.125, 0.5, 0.375, 1.0]], dtype=np.float32) expected_keypoints = np.array([ [[0.05, 0.1], [0.1, 0.2], [0.15, 0.3]], [[0.2, 0.4], [0.25, 0.5], [0.3, 0.6]], ], dtype=np.float32) self.assertAllEqual(distorted_image_.shape, [1, 400, 400, 3]) self.assertAllEqual(distorted_labels_, [1, 2]) self.assertAllClose(distorted_boxes_.flatten(), expected_boxes.flatten()) self.assertAllClose(distorted_keypoints_.flatten(), expected_keypoints.flatten()) def testRandomPadImageWithCache(self): preprocess_options = [(preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1,}), (preprocessor.random_pad_image, {})] self._testPreprocessorCache(preprocess_options, test_boxes=True, test_masks=True, test_keypoints=True) def testRandomPadImage(self): preprocessing_options = [(preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })] images = self.createTestImages() boxes = self.createTestBoxes() labels = self.createTestLabels() tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, } tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) images = tensor_dict[fields.InputDataFields.image] preprocessing_options = [(preprocessor.random_pad_image, {})] padded_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) padded_images = padded_tensor_dict[fields.InputDataFields.image] padded_boxes = padded_tensor_dict[ fields.InputDataFields.groundtruth_boxes] boxes_shape = tf.shape(boxes) padded_boxes_shape = tf.shape(padded_boxes) images_shape = tf.shape(images) padded_images_shape = tf.shape(padded_images) with self.test_session() as sess: (boxes_shape_, padded_boxes_shape_, images_shape_, padded_images_shape_, boxes_, padded_boxes_) = sess.run( [boxes_shape, padded_boxes_shape, images_shape, padded_images_shape, boxes, padded_boxes]) self.assertAllEqual(boxes_shape_, padded_boxes_shape_) self.assertTrue((images_shape_[1] >= padded_images_shape_[1] * 0.5).all) self.assertTrue((images_shape_[2] >= padded_images_shape_[2] * 0.5).all) self.assertTrue((images_shape_[1] <= padded_images_shape_[1]).all) self.assertTrue((images_shape_[2] <= padded_images_shape_[2]).all) self.assertTrue(np.all((boxes_[:, 2] - boxes_[:, 0]) >= ( padded_boxes_[:, 2] - padded_boxes_[:, 0]))) self.assertTrue(np.all((boxes_[:, 3] - boxes_[:, 1]) >= ( padded_boxes_[:, 3] - padded_boxes_[:, 1]))) def testRandomCropPadImageWithCache(self): preprocess_options = [(preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1,}), (preprocessor.random_crop_pad_image, {})] self._testPreprocessorCache(preprocess_options, test_boxes=True, test_masks=True, test_keypoints=True) def testRandomCropPadImageWithRandomCoefOne(self): preprocessing_options = [(preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })] images = self.createTestImages() boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights, } tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) images = tensor_dict[fields.InputDataFields.image] preprocessing_options = [(preprocessor.random_crop_pad_image, { 'random_coef': 1.0 })] padded_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) padded_images = padded_tensor_dict[fields.InputDataFields.image] padded_boxes = padded_tensor_dict[ fields.InputDataFields.groundtruth_boxes] boxes_shape = tf.shape(boxes) padded_boxes_shape = tf.shape(padded_boxes) images_shape = tf.shape(images) padded_images_shape = tf.shape(padded_images) with self.test_session() as sess: (boxes_shape_, padded_boxes_shape_, images_shape_, padded_images_shape_, boxes_, padded_boxes_) = sess.run( [boxes_shape, padded_boxes_shape, images_shape, padded_images_shape, boxes, padded_boxes]) self.assertAllEqual(boxes_shape_, padded_boxes_shape_) self.assertTrue((images_shape_[1] >= padded_images_shape_[1] * 0.5).all) self.assertTrue((images_shape_[2] >= padded_images_shape_[2] * 0.5).all) self.assertTrue((images_shape_[1] <= padded_images_shape_[1]).all) self.assertTrue((images_shape_[2] <= padded_images_shape_[2]).all) self.assertTrue(np.all((boxes_[:, 2] - boxes_[:, 0]) >= ( padded_boxes_[:, 2] - padded_boxes_[:, 0]))) self.assertTrue(np.all((boxes_[:, 3] - boxes_[:, 1]) >= ( padded_boxes_[:, 3] - padded_boxes_[:, 1]))) def testRandomCropToAspectRatio(self): images = self.createTestImages() boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights, } tensor_dict = preprocessor.preprocess(tensor_dict, []) images = tensor_dict[fields.InputDataFields.image] preprocessing_options = [(preprocessor.random_crop_to_aspect_ratio, { 'aspect_ratio': 2.0 })] cropped_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) cropped_images = cropped_tensor_dict[fields.InputDataFields.image] cropped_boxes = cropped_tensor_dict[ fields.InputDataFields.groundtruth_boxes] boxes_shape = tf.shape(boxes) cropped_boxes_shape = tf.shape(cropped_boxes) images_shape = tf.shape(images) cropped_images_shape = tf.shape(cropped_images) with self.test_session() as sess: (boxes_shape_, cropped_boxes_shape_, images_shape_, cropped_images_shape_) = sess.run([ boxes_shape, cropped_boxes_shape, images_shape, cropped_images_shape ]) self.assertAllEqual(boxes_shape_, cropped_boxes_shape_) self.assertEqual(images_shape_[1], cropped_images_shape_[1] * 2) self.assertEqual(images_shape_[2], cropped_images_shape_[2]) def testRandomPadToAspectRatio(self): images = self.createTestImages() boxes = self.createTestBoxes() labels = self.createTestLabels() tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, } tensor_dict = preprocessor.preprocess(tensor_dict, []) images = tensor_dict[fields.InputDataFields.image] preprocessing_options = [(preprocessor.random_pad_to_aspect_ratio, { 'aspect_ratio': 2.0 })] padded_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) padded_images = padded_tensor_dict[fields.InputDataFields.image] padded_boxes = padded_tensor_dict[ fields.InputDataFields.groundtruth_boxes] boxes_shape = tf.shape(boxes) padded_boxes_shape = tf.shape(padded_boxes) images_shape = tf.shape(images) padded_images_shape = tf.shape(padded_images) with self.test_session() as sess: (boxes_shape_, padded_boxes_shape_, images_shape_, padded_images_shape_) = sess.run([ boxes_shape, padded_boxes_shape, images_shape, padded_images_shape ]) self.assertAllEqual(boxes_shape_, padded_boxes_shape_) self.assertEqual(images_shape_[1], padded_images_shape_[1]) self.assertEqual(2 * images_shape_[2], padded_images_shape_[2]) def testRandomBlackPatchesWithCache(self): preprocess_options = [] preprocess_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocess_options.append((preprocessor.random_black_patches, { 'size_to_image_ratio': 0.5 })) self._testPreprocessorCache(preprocess_options, test_boxes=True, test_masks=True, test_keypoints=True) def testRandomBlackPatches(self): preprocessing_options = [] preprocessing_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocessing_options.append((preprocessor.random_black_patches, { 'size_to_image_ratio': 0.5 })) images = self.createTestImages() tensor_dict = {fields.InputDataFields.image: images} blacked_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) blacked_images = blacked_tensor_dict[fields.InputDataFields.image] images_shape = tf.shape(images) blacked_images_shape = tf.shape(blacked_images) with self.test_session() as sess: (images_shape_, blacked_images_shape_) = sess.run( [images_shape, blacked_images_shape]) self.assertAllEqual(images_shape_, blacked_images_shape_) def testRandomResizeMethodWithCache(self): preprocess_options = [] preprocess_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocess_options.append((preprocessor.random_resize_method, { 'target_size': (75, 150) })) self._testPreprocessorCache(preprocess_options, test_boxes=True, test_masks=True, test_keypoints=True) def testRandomResizeMethod(self): preprocessing_options = [] preprocessing_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocessing_options.append((preprocessor.random_resize_method, { 'target_size': (75, 150) })) images = self.createTestImages() tensor_dict = {fields.InputDataFields.image: images} resized_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) resized_images = resized_tensor_dict[fields.InputDataFields.image] resized_images_shape = tf.shape(resized_images) expected_images_shape = tf.constant([1, 75, 150, 3], dtype=tf.int32) with self.test_session() as sess: (expected_images_shape_, resized_images_shape_) = sess.run( [expected_images_shape, resized_images_shape]) self.assertAllEqual(expected_images_shape_, resized_images_shape_) def testResizeImageWithMasks(self): """Tests image resizing, checking output sizes.""" in_image_shape_list = [[60, 40, 3], [15, 30, 3]] in_masks_shape_list = [[15, 60, 40], [10, 15, 30]] height = 50 width = 100 expected_image_shape_list = [[50, 100, 3], [50, 100, 3]] expected_masks_shape_list = [[15, 50, 100], [10, 50, 100]] for (in_image_shape, expected_image_shape, in_masks_shape, expected_mask_shape) in zip(in_image_shape_list, expected_image_shape_list, in_masks_shape_list, expected_masks_shape_list): in_image = tf.random_uniform(in_image_shape) in_masks = tf.random_uniform(in_masks_shape) out_image, out_masks, _ = preprocessor.resize_image( in_image, in_masks, new_height=height, new_width=width) out_image_shape = tf.shape(out_image) out_masks_shape = tf.shape(out_masks) with self.test_session() as sess: out_image_shape, out_masks_shape = sess.run( [out_image_shape, out_masks_shape]) self.assertAllEqual(out_image_shape, expected_image_shape) self.assertAllEqual(out_masks_shape, expected_mask_shape) def testResizeImageWithMasksTensorInputHeightAndWidth(self): """Tests image resizing, checking output sizes.""" in_image_shape_list = [[60, 40, 3], [15, 30, 3]] in_masks_shape_list = [[15, 60, 40], [10, 15, 30]] height = tf.constant(50, dtype=tf.int32) width = tf.constant(100, dtype=tf.int32) expected_image_shape_list = [[50, 100, 3], [50, 100, 3]] expected_masks_shape_list = [[15, 50, 100], [10, 50, 100]] for (in_image_shape, expected_image_shape, in_masks_shape, expected_mask_shape) in zip(in_image_shape_list, expected_image_shape_list, in_masks_shape_list, expected_masks_shape_list): in_image = tf.random_uniform(in_image_shape) in_masks = tf.random_uniform(in_masks_shape) out_image, out_masks, _ = preprocessor.resize_image( in_image, in_masks, new_height=height, new_width=width) out_image_shape = tf.shape(out_image) out_masks_shape = tf.shape(out_masks) with self.test_session() as sess: out_image_shape, out_masks_shape = sess.run( [out_image_shape, out_masks_shape]) self.assertAllEqual(out_image_shape, expected_image_shape) self.assertAllEqual(out_masks_shape, expected_mask_shape) def testResizeImageWithNoInstanceMask(self): """Tests image resizing, checking output sizes.""" in_image_shape_list = [[60, 40, 3], [15, 30, 3]] in_masks_shape_list = [[0, 60, 40], [0, 15, 30]] height = 50 width = 100 expected_image_shape_list = [[50, 100, 3], [50, 100, 3]] expected_masks_shape_list = [[0, 50, 100], [0, 50, 100]] for (in_image_shape, expected_image_shape, in_masks_shape, expected_mask_shape) in zip(in_image_shape_list, expected_image_shape_list, in_masks_shape_list, expected_masks_shape_list): in_image = tf.random_uniform(in_image_shape) in_masks = tf.random_uniform(in_masks_shape) out_image, out_masks, _ = preprocessor.resize_image( in_image, in_masks, new_height=height, new_width=width) out_image_shape = tf.shape(out_image) out_masks_shape = tf.shape(out_masks) with self.test_session() as sess: out_image_shape, out_masks_shape = sess.run( [out_image_shape, out_masks_shape]) self.assertAllEqual(out_image_shape, expected_image_shape) self.assertAllEqual(out_masks_shape, expected_mask_shape) def testResizeToRangePreservesStaticSpatialShape(self): """Tests image resizing, checking output sizes.""" in_shape_list = [[60, 40, 3], [15, 30, 3], [15, 50, 3]] min_dim = 50 max_dim = 100 expected_shape_list = [[75, 50, 3], [50, 100, 3], [30, 100, 3]] for in_shape, expected_shape in zip(in_shape_list, expected_shape_list): in_image = tf.random_uniform(in_shape) out_image, _ = preprocessor.resize_to_range( in_image, min_dimension=min_dim, max_dimension=max_dim) self.assertAllEqual(out_image.get_shape().as_list(), expected_shape) def testResizeToRangeWithDynamicSpatialShape(self): """Tests image resizing, checking output sizes.""" in_shape_list = [[60, 40, 3], [15, 30, 3], [15, 50, 3]] min_dim = 50 max_dim = 100 expected_shape_list = [[75, 50, 3], [50, 100, 3], [30, 100, 3]] for in_shape, expected_shape in zip(in_shape_list, expected_shape_list): in_image = tf.placeholder(tf.float32, shape=(None, None, 3)) out_image, _ = preprocessor.resize_to_range( in_image, min_dimension=min_dim, max_dimension=max_dim) out_image_shape = tf.shape(out_image) with self.test_session() as sess: out_image_shape = sess.run(out_image_shape, feed_dict={in_image: np.random.randn(*in_shape)}) self.assertAllEqual(out_image_shape, expected_shape) def testResizeToRangeWithPadToMaxDimensionReturnsCorrectShapes(self): in_shape_list = [[60, 40, 3], [15, 30, 3], [15, 50, 3]] min_dim = 50 max_dim = 100 expected_shape_list = [[100, 100, 3], [100, 100, 3], [100, 100, 3]] for in_shape, expected_shape in zip(in_shape_list, expected_shape_list): in_image = tf.placeholder(tf.float32, shape=(None, None, 3)) out_image, _ = preprocessor.resize_to_range( in_image, min_dimension=min_dim, max_dimension=max_dim, pad_to_max_dimension=True) self.assertAllEqual(out_image.shape.as_list(), expected_shape) out_image_shape = tf.shape(out_image) with self.test_session() as sess: out_image_shape = sess.run( out_image_shape, feed_dict={in_image: np.random.randn(*in_shape)}) self.assertAllEqual(out_image_shape, expected_shape) def testResizeToRangeWithPadToMaxDimensionReturnsCorrectTensor(self): in_image_np = np.array([[[0, 1, 2]]], np.float32) ex_image_np = np.array( [[[0, 1, 2], [123.68, 116.779, 103.939]], [[123.68, 116.779, 103.939], [123.68, 116.779, 103.939]]], np.float32) min_dim = 1 max_dim = 2 in_image = tf.placeholder(tf.float32, shape=(None, None, 3)) out_image, _ = preprocessor.resize_to_range( in_image, min_dimension=min_dim, max_dimension=max_dim, pad_to_max_dimension=True, per_channel_pad_value=(123.68, 116.779, 103.939)) with self.test_session() as sess: out_image_np = sess.run(out_image, feed_dict={in_image: in_image_np}) self.assertAllClose(ex_image_np, out_image_np) def testResizeToRangeWithMasksPreservesStaticSpatialShape(self): """Tests image resizing, checking output sizes.""" in_image_shape_list = [[60, 40, 3], [15, 30, 3]] in_masks_shape_list = [[15, 60, 40], [10, 15, 30]] min_dim = 50 max_dim = 100 expected_image_shape_list = [[75, 50, 3], [50, 100, 3]] expected_masks_shape_list = [[15, 75, 50], [10, 50, 100]] for (in_image_shape, expected_image_shape, in_masks_shape, expected_mask_shape) in zip(in_image_shape_list, expected_image_shape_list, in_masks_shape_list, expected_masks_shape_list): in_image = tf.random_uniform(in_image_shape) in_masks = tf.random_uniform(in_masks_shape) out_image, out_masks, _ = preprocessor.resize_to_range( in_image, in_masks, min_dimension=min_dim, max_dimension=max_dim) self.assertAllEqual(out_masks.get_shape().as_list(), expected_mask_shape) self.assertAllEqual(out_image.get_shape().as_list(), expected_image_shape) def testResizeToRangeWithMasksAndPadToMaxDimension(self): """Tests image resizing, checking output sizes.""" in_image_shape_list = [[60, 40, 3], [15, 30, 3]] in_masks_shape_list = [[15, 60, 40], [10, 15, 30]] min_dim = 50 max_dim = 100 expected_image_shape_list = [[100, 100, 3], [100, 100, 3]] expected_masks_shape_list = [[15, 100, 100], [10, 100, 100]] for (in_image_shape, expected_image_shape, in_masks_shape, expected_mask_shape) in zip( in_image_shape_list, expected_image_shape_list, in_masks_shape_list, expected_masks_shape_list): in_image = tf.placeholder(tf.float32, shape=(None, None, 3)) in_masks = tf.placeholder(tf.float32, shape=(None, None, None)) out_image, out_masks, _ = preprocessor.resize_to_range( in_image, in_masks, min_dimension=min_dim, max_dimension=max_dim, pad_to_max_dimension=True) out_image_shape = tf.shape(out_image) out_masks_shape = tf.shape(out_masks) with self.test_session() as sess: out_image_shape, out_masks_shape = sess.run( [out_image_shape, out_masks_shape], feed_dict={ in_image: np.random.randn(*in_image_shape), in_masks: np.random.randn(*in_masks_shape) }) self.assertAllEqual(out_image_shape, expected_image_shape) self.assertAllEqual(out_masks_shape, expected_mask_shape) def testResizeToRangeWithMasksAndDynamicSpatialShape(self): """Tests image resizing, checking output sizes.""" in_image_shape_list = [[60, 40, 3], [15, 30, 3]] in_masks_shape_list = [[15, 60, 40], [10, 15, 30]] min_dim = 50 max_dim = 100 expected_image_shape_list = [[75, 50, 3], [50, 100, 3]] expected_masks_shape_list = [[15, 75, 50], [10, 50, 100]] for (in_image_shape, expected_image_shape, in_masks_shape, expected_mask_shape) in zip(in_image_shape_list, expected_image_shape_list, in_masks_shape_list, expected_masks_shape_list): in_image = tf.placeholder(tf.float32, shape=(None, None, 3)) in_masks = tf.placeholder(tf.float32, shape=(None, None, None)) in_masks = tf.random_uniform(in_masks_shape) out_image, out_masks, _ = preprocessor.resize_to_range( in_image, in_masks, min_dimension=min_dim, max_dimension=max_dim) out_image_shape = tf.shape(out_image) out_masks_shape = tf.shape(out_masks) with self.test_session() as sess: out_image_shape, out_masks_shape = sess.run( [out_image_shape, out_masks_shape], feed_dict={ in_image: np.random.randn(*in_image_shape), in_masks: np.random.randn(*in_masks_shape) }) self.assertAllEqual(out_image_shape, expected_image_shape) self.assertAllEqual(out_masks_shape, expected_mask_shape) def testResizeToRangeWithInstanceMasksTensorOfSizeZero(self): """Tests image resizing, checking output sizes.""" in_image_shape_list = [[60, 40, 3], [15, 30, 3]] in_masks_shape_list = [[0, 60, 40], [0, 15, 30]] min_dim = 50 max_dim = 100 expected_image_shape_list = [[75, 50, 3], [50, 100, 3]] expected_masks_shape_list = [[0, 75, 50], [0, 50, 100]] for (in_image_shape, expected_image_shape, in_masks_shape, expected_mask_shape) in zip(in_image_shape_list, expected_image_shape_list, in_masks_shape_list, expected_masks_shape_list): in_image = tf.random_uniform(in_image_shape) in_masks = tf.random_uniform(in_masks_shape) out_image, out_masks, _ = preprocessor.resize_to_range( in_image, in_masks, min_dimension=min_dim, max_dimension=max_dim) out_image_shape = tf.shape(out_image) out_masks_shape = tf.shape(out_masks) with self.test_session() as sess: out_image_shape, out_masks_shape = sess.run( [out_image_shape, out_masks_shape]) self.assertAllEqual(out_image_shape, expected_image_shape) self.assertAllEqual(out_masks_shape, expected_mask_shape) def testResizeToRange4DImageTensor(self): image = tf.random_uniform([1, 200, 300, 3]) with self.assertRaises(ValueError): preprocessor.resize_to_range(image, 500, 600) def testResizeToRangeSameMinMax(self): """Tests image resizing, checking output sizes.""" in_shape_list = [[312, 312, 3], [299, 299, 3]] min_dim = 320 max_dim = 320 expected_shape_list = [[320, 320, 3], [320, 320, 3]] for in_shape, expected_shape in zip(in_shape_list, expected_shape_list): in_image = tf.random_uniform(in_shape) out_image, _ = preprocessor.resize_to_range( in_image, min_dimension=min_dim, max_dimension=max_dim) out_image_shape = tf.shape(out_image) with self.test_session() as sess: out_image_shape = sess.run(out_image_shape) self.assertAllEqual(out_image_shape, expected_shape) def testResizeToMinDimensionTensorShapes(self): in_image_shape_list = [[60, 55, 3], [15, 30, 3]] in_masks_shape_list = [[15, 60, 55], [10, 15, 30]] min_dim = 50 expected_image_shape_list = [[60, 55, 3], [50, 100, 3]] expected_masks_shape_list = [[15, 60, 55], [10, 50, 100]] for (in_image_shape, expected_image_shape, in_masks_shape, expected_mask_shape) in zip(in_image_shape_list, expected_image_shape_list, in_masks_shape_list, expected_masks_shape_list): in_image = tf.placeholder(tf.float32, shape=(None, None, 3)) in_masks = tf.placeholder(tf.float32, shape=(None, None, None)) in_masks = tf.random_uniform(in_masks_shape) out_image, out_masks, _ = preprocessor.resize_to_min_dimension( in_image, in_masks, min_dimension=min_dim) out_image_shape = tf.shape(out_image) out_masks_shape = tf.shape(out_masks) with self.test_session() as sess: out_image_shape, out_masks_shape = sess.run( [out_image_shape, out_masks_shape], feed_dict={ in_image: np.random.randn(*in_image_shape), in_masks: np.random.randn(*in_masks_shape) }) self.assertAllEqual(out_image_shape, expected_image_shape) self.assertAllEqual(out_masks_shape, expected_mask_shape) def testResizeToMinDimensionWithInstanceMasksTensorOfSizeZero(self): """Tests image resizing, checking output sizes.""" in_image_shape_list = [[60, 40, 3], [15, 30, 3]] in_masks_shape_list = [[0, 60, 40], [0, 15, 30]] min_dim = 50 expected_image_shape_list = [[75, 50, 3], [50, 100, 3]] expected_masks_shape_list = [[0, 75, 50], [0, 50, 100]] for (in_image_shape, expected_image_shape, in_masks_shape, expected_mask_shape) in zip(in_image_shape_list, expected_image_shape_list, in_masks_shape_list, expected_masks_shape_list): in_image = tf.random_uniform(in_image_shape) in_masks = tf.random_uniform(in_masks_shape) out_image, out_masks, _ = preprocessor.resize_to_min_dimension( in_image, in_masks, min_dimension=min_dim) out_image_shape = tf.shape(out_image) out_masks_shape = tf.shape(out_masks) with self.test_session() as sess: out_image_shape, out_masks_shape = sess.run( [out_image_shape, out_masks_shape]) self.assertAllEqual(out_image_shape, expected_image_shape) self.assertAllEqual(out_masks_shape, expected_mask_shape) def testResizeToMinDimensionRaisesErrorOn4DImage(self): image = tf.random_uniform([1, 200, 300, 3]) with self.assertRaises(ValueError): preprocessor.resize_to_min_dimension(image, 500) def testScaleBoxesToPixelCoordinates(self): """Tests box scaling, checking scaled values.""" in_shape = [60, 40, 3] in_boxes = [[0.1, 0.2, 0.4, 0.6], [0.5, 0.3, 0.9, 0.7]] expected_boxes = [[6., 8., 24., 24.], [30., 12., 54., 28.]] in_image = tf.random_uniform(in_shape) in_boxes = tf.constant(in_boxes) _, out_boxes = preprocessor.scale_boxes_to_pixel_coordinates( in_image, boxes=in_boxes) with self.test_session() as sess: out_boxes = sess.run(out_boxes) self.assertAllClose(out_boxes, expected_boxes) def testScaleBoxesToPixelCoordinatesWithKeypoints(self): """Tests box and keypoint scaling, checking scaled values.""" in_shape = [60, 40, 3] in_boxes = self.createTestBoxes() in_keypoints = self.createTestKeypoints() expected_boxes = [[0., 10., 45., 40.], [15., 20., 45., 40.]] expected_keypoints = [ [[6., 4.], [12., 8.], [18., 12.]], [[24., 16.], [30., 20.], [36., 24.]], ] in_image = tf.random_uniform(in_shape) _, out_boxes, out_keypoints = preprocessor.scale_boxes_to_pixel_coordinates( in_image, boxes=in_boxes, keypoints=in_keypoints) with self.test_session() as sess: out_boxes_, out_keypoints_ = sess.run([out_boxes, out_keypoints]) self.assertAllClose(out_boxes_, expected_boxes) self.assertAllClose(out_keypoints_, expected_keypoints) def testSubtractChannelMean(self): """Tests whether channel means have been subtracted.""" with self.test_session(): image = tf.zeros((240, 320, 3)) means = [1, 2, 3] actual = preprocessor.subtract_channel_mean(image, means=means) actual = actual.eval() self.assertTrue((actual[:, :, 0] == -1).all()) self.assertTrue((actual[:, :, 1] == -2).all()) self.assertTrue((actual[:, :, 2] == -3).all()) def testOneHotEncoding(self): """Tests one hot encoding of multiclass labels.""" with self.test_session(): labels = tf.constant([1, 4, 2], dtype=tf.int32) one_hot = preprocessor.one_hot_encoding(labels, num_classes=5) one_hot = one_hot.eval() self.assertAllEqual([0, 1, 1, 0, 1], one_hot) def testSSDRandomCropWithCache(self): preprocess_options = [ (preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 }), (preprocessor.ssd_random_crop, {})] self._testPreprocessorCache(preprocess_options, test_boxes=True, test_masks=False, test_keypoints=False) def testSSDRandomCrop(self): preprocessing_options = [ (preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 }), (preprocessor.ssd_random_crop, {})] images = self.createTestImages() boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights, } distorted_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) distorted_images = distorted_tensor_dict[fields.InputDataFields.image] distorted_boxes = distorted_tensor_dict[ fields.InputDataFields.groundtruth_boxes] images_rank = tf.rank(images) distorted_images_rank = tf.rank(distorted_images) boxes_rank = tf.rank(boxes) distorted_boxes_rank = tf.rank(distorted_boxes) with self.test_session() as sess: (boxes_rank_, distorted_boxes_rank_, images_rank_, distorted_images_rank_) = sess.run( [boxes_rank, distorted_boxes_rank, images_rank, distorted_images_rank]) self.assertAllEqual(boxes_rank_, distorted_boxes_rank_) self.assertAllEqual(images_rank_, distorted_images_rank_) def testSSDRandomCropWithMultiClassScores(self): preprocessing_options = [(preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 }), (preprocessor.ssd_random_crop, {})] images = self.createTestImages() boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() multiclass_scores = self.createTestMultiClassScores() tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.multiclass_scores: multiclass_scores, fields.InputDataFields.groundtruth_weights: weights, } preprocessor_arg_map = preprocessor.get_default_func_arg_map( include_multiclass_scores=True) distorted_tensor_dict = preprocessor.preprocess( tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) distorted_images = distorted_tensor_dict[fields.InputDataFields.image] distorted_boxes = distorted_tensor_dict[ fields.InputDataFields.groundtruth_boxes] distorted_multiclass_scores = distorted_tensor_dict[ fields.InputDataFields.multiclass_scores] images_rank = tf.rank(images) distorted_images_rank = tf.rank(distorted_images) boxes_rank = tf.rank(boxes) distorted_boxes_rank = tf.rank(distorted_boxes) multiclass_scores_rank = tf.rank(multiclass_scores) distorted_multiclass_scores_rank = tf.rank(distorted_multiclass_scores) with self.test_session() as sess: (boxes_rank_, distorted_boxes_, distorted_boxes_rank_, images_rank_, distorted_images_rank_, multiclass_scores_rank_, distorted_multiclass_scores_, distorted_multiclass_scores_rank_) = sess.run([ boxes_rank, distorted_boxes, distorted_boxes_rank, images_rank, distorted_images_rank, multiclass_scores_rank, distorted_multiclass_scores, distorted_multiclass_scores_rank ]) self.assertAllEqual(boxes_rank_, distorted_boxes_rank_) self.assertAllEqual(images_rank_, distorted_images_rank_) self.assertAllEqual(multiclass_scores_rank_, distorted_multiclass_scores_rank_) self.assertAllEqual(distorted_boxes_.shape[0], distorted_multiclass_scores_.shape[0]) def testSSDRandomCropPad(self): images = self.createTestImages() boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() preprocessing_options = [ (preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 }), (preprocessor.ssd_random_crop_pad, {})] tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights, } distorted_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) distorted_images = distorted_tensor_dict[fields.InputDataFields.image] distorted_boxes = distorted_tensor_dict[ fields.InputDataFields.groundtruth_boxes] images_rank = tf.rank(images) distorted_images_rank = tf.rank(distorted_images) boxes_rank = tf.rank(boxes) distorted_boxes_rank = tf.rank(distorted_boxes) with self.test_session() as sess: (boxes_rank_, distorted_boxes_rank_, images_rank_, distorted_images_rank_) = sess.run([ boxes_rank, distorted_boxes_rank, images_rank, distorted_images_rank ]) self.assertAllEqual(boxes_rank_, distorted_boxes_rank_) self.assertAllEqual(images_rank_, distorted_images_rank_) def testSSDRandomCropFixedAspectRatioWithCache(self): preprocess_options = [ (preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 }), (preprocessor.ssd_random_crop_fixed_aspect_ratio, {})] self._testPreprocessorCache(preprocess_options, test_boxes=True, test_masks=False, test_keypoints=False) def _testSSDRandomCropFixedAspectRatio(self, include_multiclass_scores, include_instance_masks, include_keypoints): images = self.createTestImages() boxes = self.createTestBoxes() labels = self.createTestLabels() weights = self.createTestGroundtruthWeights() preprocessing_options = [(preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 }), (preprocessor.ssd_random_crop_fixed_aspect_ratio, {})] tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, fields.InputDataFields.groundtruth_weights: weights } if include_multiclass_scores: multiclass_scores = self.createTestMultiClassScores() tensor_dict[fields.InputDataFields.multiclass_scores] = ( multiclass_scores) if include_instance_masks: masks = self.createTestMasks() tensor_dict[fields.InputDataFields.groundtruth_instance_masks] = masks if include_keypoints: keypoints = self.createTestKeypoints() tensor_dict[fields.InputDataFields.groundtruth_keypoints] = keypoints preprocessor_arg_map = preprocessor.get_default_func_arg_map( include_multiclass_scores=include_multiclass_scores, include_instance_masks=include_instance_masks, include_keypoints=include_keypoints) distorted_tensor_dict = preprocessor.preprocess( tensor_dict, preprocessing_options, func_arg_map=preprocessor_arg_map) distorted_images = distorted_tensor_dict[fields.InputDataFields.image] distorted_boxes = distorted_tensor_dict[ fields.InputDataFields.groundtruth_boxes] images_rank = tf.rank(images) distorted_images_rank = tf.rank(distorted_images) boxes_rank = tf.rank(boxes) distorted_boxes_rank = tf.rank(distorted_boxes) with self.test_session() as sess: (boxes_rank_, distorted_boxes_rank_, images_rank_, distorted_images_rank_) = sess.run( [boxes_rank, distorted_boxes_rank, images_rank, distorted_images_rank]) self.assertAllEqual(boxes_rank_, distorted_boxes_rank_) self.assertAllEqual(images_rank_, distorted_images_rank_) def testSSDRandomCropFixedAspectRatio(self): self._testSSDRandomCropFixedAspectRatio(include_multiclass_scores=False, include_instance_masks=False, include_keypoints=False) def testSSDRandomCropFixedAspectRatioWithMultiClassScores(self): self._testSSDRandomCropFixedAspectRatio(include_multiclass_scores=True, include_instance_masks=False, include_keypoints=False) def testSSDRandomCropFixedAspectRatioWithMasksAndKeypoints(self): self._testSSDRandomCropFixedAspectRatio(include_multiclass_scores=False, include_instance_masks=True, include_keypoints=True) def testSSDRandomCropFixedAspectRatioWithLabelScoresMasksAndKeypoints(self): self._testSSDRandomCropFixedAspectRatio(include_multiclass_scores=False, include_instance_masks=True, include_keypoints=True) def testConvertClassLogitsToSoftmax(self): multiclass_scores = tf.constant( [[1.0, 0.0], [0.5, 0.5], [1000, 1]], dtype=tf.float32) temperature = 2.0 converted_multiclass_scores = ( preprocessor.convert_class_logits_to_softmax( multiclass_scores=multiclass_scores, temperature=temperature)) expected_converted_multiclass_scores = [[[0.62245935, 0.37754068], [0.5, 0.5], [1, 0]]] with self.test_session() as sess: (converted_multiclass_scores_) = sess.run([converted_multiclass_scores]) self.assertAllClose(converted_multiclass_scores_, expected_converted_multiclass_scores) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/core/preprocessor_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.core.keypoint_ops.""" import numpy as np import tensorflow as tf from object_detection.core import keypoint_ops class KeypointOpsTest(tf.test.TestCase): """Tests for common keypoint operations.""" def test_scale(self): keypoints = tf.constant([ [[0.0, 0.0], [100.0, 200.0]], [[50.0, 120.0], [100.0, 140.0]] ]) y_scale = tf.constant(1.0 / 100) x_scale = tf.constant(1.0 / 200) expected_keypoints = tf.constant([ [[0., 0.], [1.0, 1.0]], [[0.5, 0.6], [1.0, 0.7]] ]) output = keypoint_ops.scale(keypoints, y_scale, x_scale) with self.test_session() as sess: output_, expected_keypoints_ = sess.run([output, expected_keypoints]) self.assertAllClose(output_, expected_keypoints_) def test_clip_to_window(self): keypoints = tf.constant([ [[0.25, 0.5], [0.75, 0.75]], [[0.5, 0.0], [1.0, 1.0]] ]) window = tf.constant([0.25, 0.25, 0.75, 0.75]) expected_keypoints = tf.constant([ [[0.25, 0.5], [0.75, 0.75]], [[0.5, 0.25], [0.75, 0.75]] ]) output = keypoint_ops.clip_to_window(keypoints, window) with self.test_session() as sess: output_, expected_keypoints_ = sess.run([output, expected_keypoints]) self.assertAllClose(output_, expected_keypoints_) def test_prune_outside_window(self): keypoints = tf.constant([ [[0.25, 0.5], [0.75, 0.75]], [[0.5, 0.0], [1.0, 1.0]] ]) window = tf.constant([0.25, 0.25, 0.75, 0.75]) expected_keypoints = tf.constant([[[0.25, 0.5], [0.75, 0.75]], [[np.nan, np.nan], [np.nan, np.nan]]]) output = keypoint_ops.prune_outside_window(keypoints, window) with self.test_session() as sess: output_, expected_keypoints_ = sess.run([output, expected_keypoints]) self.assertAllClose(output_, expected_keypoints_) def test_change_coordinate_frame(self): keypoints = tf.constant([ [[0.25, 0.5], [0.75, 0.75]], [[0.5, 0.0], [1.0, 1.0]] ]) window = tf.constant([0.25, 0.25, 0.75, 0.75]) expected_keypoints = tf.constant([ [[0, 0.5], [1.0, 1.0]], [[0.5, -0.5], [1.5, 1.5]] ]) output = keypoint_ops.change_coordinate_frame(keypoints, window) with self.test_session() as sess: output_, expected_keypoints_ = sess.run([output, expected_keypoints]) self.assertAllClose(output_, expected_keypoints_) def test_to_normalized_coordinates(self): keypoints = tf.constant([ [[10., 30.], [30., 45.]], [[20., 0.], [40., 60.]] ]) output = keypoint_ops.to_normalized_coordinates( keypoints, 40, 60) expected_keypoints = tf.constant([ [[0.25, 0.5], [0.75, 0.75]], [[0.5, 0.0], [1.0, 1.0]] ]) with self.test_session() as sess: output_, expected_keypoints_ = sess.run([output, expected_keypoints]) self.assertAllClose(output_, expected_keypoints_) def test_to_normalized_coordinates_already_normalized(self): keypoints = tf.constant([ [[0.25, 0.5], [0.75, 0.75]], [[0.5, 0.0], [1.0, 1.0]] ]) output = keypoint_ops.to_normalized_coordinates( keypoints, 40, 60) with self.test_session() as sess: with self.assertRaisesOpError('assertion failed'): sess.run(output) def test_to_absolute_coordinates(self): keypoints = tf.constant([ [[0.25, 0.5], [0.75, 0.75]], [[0.5, 0.0], [1.0, 1.0]] ]) output = keypoint_ops.to_absolute_coordinates( keypoints, 40, 60) expected_keypoints = tf.constant([ [[10., 30.], [30., 45.]], [[20., 0.], [40., 60.]] ]) with self.test_session() as sess: output_, expected_keypoints_ = sess.run([output, expected_keypoints]) self.assertAllClose(output_, expected_keypoints_) def test_to_absolute_coordinates_already_absolute(self): keypoints = tf.constant([ [[10., 30.], [30., 45.]], [[20., 0.], [40., 60.]] ]) output = keypoint_ops.to_absolute_coordinates( keypoints, 40, 60) with self.test_session() as sess: with self.assertRaisesOpError('assertion failed'): sess.run(output) def test_flip_horizontal(self): keypoints = tf.constant([ [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]], [[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]] ]) flip_permutation = [0, 2, 1] expected_keypoints = tf.constant([ [[0.1, 0.9], [0.3, 0.7], [0.2, 0.8]], [[0.4, 0.6], [0.6, 0.4], [0.5, 0.5]], ]) output = keypoint_ops.flip_horizontal(keypoints, 0.5, flip_permutation) with self.test_session() as sess: output_, expected_keypoints_ = sess.run([output, expected_keypoints]) self.assertAllClose(output_, expected_keypoints_) def test_flip_vertical(self): keypoints = tf.constant([ [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]], [[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]] ]) flip_permutation = [0, 2, 1] expected_keypoints = tf.constant([ [[0.9, 0.1], [0.7, 0.3], [0.8, 0.2]], [[0.6, 0.4], [0.4, 0.6], [0.5, 0.5]], ]) output = keypoint_ops.flip_vertical(keypoints, 0.5, flip_permutation) with self.test_session() as sess: output_, expected_keypoints_ = sess.run([output, expected_keypoints]) self.assertAllClose(output_, expected_keypoints_) def test_rot90(self): keypoints = tf.constant([ [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]], [[0.4, 0.6], [0.5, 0.6], [0.6, 0.7]] ]) expected_keypoints = tf.constant([ [[0.9, 0.1], [0.8, 0.2], [0.7, 0.3]], [[0.4, 0.4], [0.4, 0.5], [0.3, 0.6]], ]) output = keypoint_ops.rot90(keypoints) with self.test_session() as sess: output_, expected_keypoints_ = sess.run([output, expected_keypoints]) self.assertAllClose(output_, expected_keypoints_) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/core/keypoint_ops_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Box predictor for object detectors. Box predictors are classes that take a high level image feature map as input and produce two predictions, (1) a tensor encoding box locations, and (2) a tensor encoding classes for each box. These components are passed directly to loss functions in our detection models. These modules are separated from the main model since the same few box predictor architectures are shared across many models. """ from abc import abstractmethod import tensorflow as tf BOX_ENCODINGS = 'box_encodings' CLASS_PREDICTIONS_WITH_BACKGROUND = 'class_predictions_with_background' MASK_PREDICTIONS = 'mask_predictions' class BoxPredictor(object): """BoxPredictor.""" def __init__(self, is_training, num_classes): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). """ self._is_training = is_training self._num_classes = num_classes @property def is_keras_model(self): return False @property def num_classes(self): return self._num_classes def predict(self, image_features, num_predictions_per_location, scope=None, **params): """Computes encoded object locations and corresponding confidences. Takes a list of high level image feature maps as input and produces a list of box encodings and a list of class scores where each element in the output lists correspond to the feature maps in the input list. Args: image_features: A list of float tensors of shape [batch_size, height_i, width_i, channels_i] containing features for a batch of images. num_predictions_per_location: A list of integers representing the number of box predictions to be made per spatial location for each feature map. scope: Variable and Op scope name. **params: Additional keyword arguments for specific implementations of BoxPredictor. Returns: A dictionary containing at least the following tensors. box_encodings: A list of float tensors. Each entry in the list corresponds to a feature map in the input `image_features` list. All tensors in the list have one of the two following shapes: a. [batch_size, num_anchors_i, q, code_size] representing the location of the objects, where q is 1 or the number of classes. b. [batch_size, num_anchors_i, code_size]. class_predictions_with_background: A list of float tensors of shape [batch_size, num_anchors_i, num_classes + 1] representing the class predictions for the proposals. Each entry in the list corresponds to a feature map in the input `image_features` list. Raises: ValueError: If length of `image_features` is not equal to length of `num_predictions_per_location`. """ if len(image_features) != len(num_predictions_per_location): raise ValueError('image_feature and num_predictions_per_location must ' 'be of same length, found: {} vs {}'. format(len(image_features), len(num_predictions_per_location))) if scope is not None: with tf.variable_scope(scope): return self._predict(image_features, num_predictions_per_location, **params) return self._predict(image_features, num_predictions_per_location, **params) # TODO(rathodv): num_predictions_per_location could be moved to constructor. # This is currently only used by ConvolutionalBoxPredictor. @abstractmethod def _predict(self, image_features, num_predictions_per_location, **params): """Implementations must override this method. Args: image_features: A list of float tensors of shape [batch_size, height_i, width_i, channels_i] containing features for a batch of images. num_predictions_per_location: A list of integers representing the number of box predictions to be made per spatial location for each feature map. **params: Additional keyword arguments for specific implementations of BoxPredictor. Returns: A dictionary containing at least the following tensors. box_encodings: A list of float tensors. Each entry in the list corresponds to a feature map in the input `image_features` list. All tensors in the list have one of the two following shapes: a. [batch_size, num_anchors_i, q, code_size] representing the location of the objects, where q is 1 or the number of classes. b. [batch_size, num_anchors_i, code_size]. class_predictions_with_background: A list of float tensors of shape [batch_size, num_anchors_i, num_classes + 1] representing the class predictions for the proposals. Each entry in the list corresponds to a feature map in the input `image_features` list. """ pass class KerasBoxPredictor(tf.keras.Model): """Keras-based BoxPredictor.""" def __init__(self, is_training, num_classes, freeze_batchnorm, inplace_batchnorm_update, name=None): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). freeze_batchnorm: Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: Whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. name: A string name scope to assign to the model. If `None`, Keras will auto-generate one from the class name. """ super(KerasBoxPredictor, self).__init__(name=name) self._is_training = is_training self._num_classes = num_classes self._freeze_batchnorm = freeze_batchnorm self._inplace_batchnorm_update = inplace_batchnorm_update @property def is_keras_model(self): return True @property def num_classes(self): return self._num_classes def call(self, image_features, **kwargs): """Computes encoded object locations and corresponding confidences. Takes a list of high level image feature maps as input and produces a list of box encodings and a list of class scores where each element in the output lists correspond to the feature maps in the input list. Args: image_features: A list of float tensors of shape [batch_size, height_i, width_i, channels_i] containing features for a batch of images. **kwargs: Additional keyword arguments for specific implementations of BoxPredictor. Returns: A dictionary containing at least the following tensors. box_encodings: A list of float tensors. Each entry in the list corresponds to a feature map in the input `image_features` list. All tensors in the list have one of the two following shapes: a. [batch_size, num_anchors_i, q, code_size] representing the location of the objects, where q is 1 or the number of classes. b. [batch_size, num_anchors_i, code_size]. class_predictions_with_background: A list of float tensors of shape [batch_size, num_anchors_i, num_classes + 1] representing the class predictions for the proposals. Each entry in the list corresponds to a feature map in the input `image_features` list. """ return self._predict(image_features, **kwargs) @abstractmethod def _predict(self, image_features, **kwargs): """Implementations must override this method. Args: image_features: A list of float tensors of shape [batch_size, height_i, width_i, channels_i] containing features for a batch of images. **kwargs: Additional keyword arguments for specific implementations of BoxPredictor. Returns: A dictionary containing at least the following tensors. box_encodings: A list of float tensors. Each entry in the list corresponds to a feature map in the input `image_features` list. All tensors in the list have one of the two following shapes: a. [batch_size, num_anchors_i, q, code_size] representing the location of the objects, where q is 1 or the number of classes. b. [batch_size, num_anchors_i, code_size]. class_predictions_with_background: A list of float tensors of shape [batch_size, num_anchors_i, num_classes + 1] representing the class predictions for the proposals. Each entry in the list corresponds to a feature map in the input `image_features` list. """ raise NotImplementedError
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/core/box_predictor.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Class to subsample minibatches by balancing positives and negatives. Subsamples minibatches based on a pre-specified positive fraction in range [0,1]. The class presumes there are many more negatives than positive examples: if the desired batch_size cannot be achieved with the pre-specified positive fraction, it fills the rest with negative examples. If this is not sufficient for obtaining the desired batch_size, it returns fewer examples. The main function to call is Subsample(self, indicator, labels). For convenience one can also call SubsampleWeights(self, weights, labels) which is defined in the minibatch_sampler base class. When is_static is True, it implements a method that guarantees static shapes. It also ensures the length of output of the subsample is always batch_size, even when number of examples set to True in indicator is less than batch_size. """ import tensorflow as tf from object_detection.core import minibatch_sampler from object_detection.utils import ops class BalancedPositiveNegativeSampler(minibatch_sampler.MinibatchSampler): """Subsamples minibatches to a desired balance of positives and negatives.""" def __init__(self, positive_fraction=0.5, is_static=False): """Constructs a minibatch sampler. Args: positive_fraction: desired fraction of positive examples (scalar in [0,1]) in the batch. is_static: If True, uses an implementation with static shape guarantees. Raises: ValueError: if positive_fraction < 0, or positive_fraction > 1 """ if positive_fraction < 0 or positive_fraction > 1: raise ValueError('positive_fraction should be in range [0,1]. ' 'Received: %s.' % positive_fraction) self._positive_fraction = positive_fraction self._is_static = is_static def _get_num_pos_neg_samples(self, sorted_indices_tensor, sample_size): """Counts the number of positives and negatives numbers to be sampled. Args: sorted_indices_tensor: A sorted int32 tensor of shape [N] which contains the signed indices of the examples where the sign is based on the label value. The examples that cannot be sampled are set to 0. It samples atmost sample_size*positive_fraction positive examples and remaining from negative examples. sample_size: Size of subsamples. Returns: A tuple containing the number of positive and negative labels in the subsample. """ input_length = tf.shape(sorted_indices_tensor)[0] valid_positive_index = tf.greater(sorted_indices_tensor, tf.zeros(input_length, tf.int32)) num_sampled_pos = tf.reduce_sum(tf.cast(valid_positive_index, tf.int32)) max_num_positive_samples = tf.constant( int(sample_size * self._positive_fraction), tf.int32) num_positive_samples = tf.minimum(max_num_positive_samples, num_sampled_pos) num_negative_samples = tf.constant(sample_size, tf.int32) - num_positive_samples return num_positive_samples, num_negative_samples def _get_values_from_start_and_end(self, input_tensor, num_start_samples, num_end_samples, total_num_samples): """slices num_start_samples and last num_end_samples from input_tensor. Args: input_tensor: An int32 tensor of shape [N] to be sliced. num_start_samples: Number of examples to be sliced from the beginning of the input tensor. num_end_samples: Number of examples to be sliced from the end of the input tensor. total_num_samples: Sum of is num_start_samples and num_end_samples. This should be a scalar. Returns: A tensor containing the first num_start_samples and last num_end_samples from input_tensor. """ input_length = tf.shape(input_tensor)[0] start_positions = tf.less(tf.range(input_length), num_start_samples) end_positions = tf.greater_equal( tf.range(input_length), input_length - num_end_samples) selected_positions = tf.logical_or(start_positions, end_positions) selected_positions = tf.cast(selected_positions, tf.float32) indexed_positions = tf.multiply(tf.cumsum(selected_positions), selected_positions) one_hot_selector = tf.one_hot(tf.cast(indexed_positions, tf.int32) - 1, total_num_samples, dtype=tf.float32) return tf.cast(tf.tensordot(tf.cast(input_tensor, tf.float32), one_hot_selector, axes=[0, 0]), tf.int32) def _static_subsample(self, indicator, batch_size, labels): """Returns subsampled minibatch. Args: indicator: boolean tensor of shape [N] whose True entries can be sampled. N should be a complie time constant. batch_size: desired batch size. This scalar cannot be None. labels: boolean tensor of shape [N] denoting positive(=True) and negative (=False) examples. N should be a complie time constant. Returns: sampled_idx_indicator: boolean tensor of shape [N], True for entries which are sampled. It ensures the length of output of the subsample is always batch_size, even when number of examples set to True in indicator is less than batch_size. Raises: ValueError: if labels and indicator are not 1D boolean tensors. """ # Check if indicator and labels have a static size. if not indicator.shape.is_fully_defined(): raise ValueError('indicator must be static in shape when is_static is' 'True') if not labels.shape.is_fully_defined(): raise ValueError('labels must be static in shape when is_static is' 'True') if not isinstance(batch_size, int): raise ValueError('batch_size has to be an integer when is_static is' 'True.') input_length = tf.shape(indicator)[0] # Set the number of examples set True in indicator to be at least # batch_size. num_true_sampled = tf.reduce_sum(tf.cast(indicator, tf.float32)) additional_false_sample = tf.less_equal( tf.cumsum(tf.cast(tf.logical_not(indicator), tf.float32)), batch_size - num_true_sampled) indicator = tf.logical_or(indicator, additional_false_sample) # Shuffle indicator and label. Need to store the permutation to restore the # order post sampling. permutation = tf.random_shuffle(tf.range(input_length)) indicator = ops.matmul_gather_on_zeroth_axis( tf.cast(indicator, tf.float32), permutation) labels = ops.matmul_gather_on_zeroth_axis( tf.cast(labels, tf.float32), permutation) # index (starting from 1) when indicator is True, 0 when False indicator_idx = tf.where( tf.cast(indicator, tf.bool), tf.range(1, input_length + 1), tf.zeros(input_length, tf.int32)) # Replace -1 for negative, +1 for positive labels signed_label = tf.where( tf.cast(labels, tf.bool), tf.ones(input_length, tf.int32), tf.scalar_mul(-1, tf.ones(input_length, tf.int32))) # negative of index for negative label, positive index for positive label, # 0 when indicator is False. signed_indicator_idx = tf.multiply(indicator_idx, signed_label) sorted_signed_indicator_idx = tf.nn.top_k( signed_indicator_idx, input_length, sorted=True).values [num_positive_samples, num_negative_samples] = self._get_num_pos_neg_samples( sorted_signed_indicator_idx, batch_size) sampled_idx = self._get_values_from_start_and_end( sorted_signed_indicator_idx, num_positive_samples, num_negative_samples, batch_size) # Shift the indices to start from 0 and remove any samples that are set as # False. sampled_idx = tf.abs(sampled_idx) - tf.ones(batch_size, tf.int32) sampled_idx = tf.multiply( tf.cast(tf.greater_equal(sampled_idx, tf.constant(0)), tf.int32), sampled_idx) sampled_idx_indicator = tf.cast(tf.reduce_sum( tf.one_hot(sampled_idx, depth=input_length), axis=0), tf.bool) # project back the order based on stored permutations reprojections = tf.one_hot(permutation, depth=input_length, dtype=tf.float32) return tf.cast(tf.tensordot( tf.cast(sampled_idx_indicator, tf.float32), reprojections, axes=[0, 0]), tf.bool) def subsample(self, indicator, batch_size, labels, scope=None): """Returns subsampled minibatch. Args: indicator: boolean tensor of shape [N] whose True entries can be sampled. batch_size: desired batch size. If None, keeps all positive samples and randomly selects negative samples so that the positive sample fraction matches self._positive_fraction. It cannot be None is is_static is True. labels: boolean tensor of shape [N] denoting positive(=True) and negative (=False) examples. scope: name scope. Returns: sampled_idx_indicator: boolean tensor of shape [N], True for entries which are sampled. Raises: ValueError: if labels and indicator are not 1D boolean tensors. """ if len(indicator.get_shape().as_list()) != 1: raise ValueError('indicator must be 1 dimensional, got a tensor of ' 'shape %s' % indicator.get_shape()) if len(labels.get_shape().as_list()) != 1: raise ValueError('labels must be 1 dimensional, got a tensor of ' 'shape %s' % labels.get_shape()) if labels.dtype != tf.bool: raise ValueError('labels should be of type bool. Received: %s' % labels.dtype) if indicator.dtype != tf.bool: raise ValueError('indicator should be of type bool. Received: %s' % indicator.dtype) with tf.name_scope(scope, 'BalancedPositiveNegativeSampler'): if self._is_static: return self._static_subsample(indicator, batch_size, labels) else: # Only sample from indicated samples negative_idx = tf.logical_not(labels) positive_idx = tf.logical_and(labels, indicator) negative_idx = tf.logical_and(negative_idx, indicator) # Sample positive and negative samples separately if batch_size is None: max_num_pos = tf.reduce_sum(tf.to_int32(positive_idx)) else: max_num_pos = int(self._positive_fraction * batch_size) sampled_pos_idx = self.subsample_indicator(positive_idx, max_num_pos) num_sampled_pos = tf.reduce_sum(tf.cast(sampled_pos_idx, tf.int32)) if batch_size is None: negative_positive_ratio = ( 1 - self._positive_fraction) / self._positive_fraction max_num_neg = tf.to_int32( negative_positive_ratio * tf.to_float(num_sampled_pos)) else: max_num_neg = batch_size - num_sampled_pos sampled_neg_idx = self.subsample_indicator(negative_idx, max_num_neg) return tf.logical_or(sampled_pos_idx, sampled_neg_idx)
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/core/balanced_positive_negative_sampler.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A freezable batch norm layer that uses Keras batch normalization.""" import tensorflow as tf class FreezableBatchNorm(tf.keras.layers.BatchNormalization): """Batch normalization layer (Ioffe and Szegedy, 2014). This is a `freezable` batch norm layer that supports setting the `training` parameter in the __init__ method rather than having to set it either via the Keras learning phase or via the `call` method parameter. This layer will forward all other parameters to the default Keras `BatchNormalization` layer This is class is necessary because Object Detection model training sometimes requires batch normalization layers to be `frozen` and used as if it was evaluation time, despite still training (and potentially using dropout layers) Like the default Keras BatchNormalization layer, this will normalize the activations of the previous layer at each batch, i.e. applies a transformation that maintains the mean activation close to 0 and the activation standard deviation close to 1. Arguments: training: Boolean or None. If True, the batch normalization layer will normalize the input batch using the batch mean and standard deviation, and update the total moving mean and standard deviations. If False, the layer will normalize using the moving average and std. dev, without updating the learned avg and std. dev. If None, the layer will follow the keras BatchNormalization layer strategy of checking the Keras learning phase at `call` time to decide what to do. **kwargs: The keyword arguments to forward to the keras BatchNormalization layer constructor. Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as input. References: - [Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift](https://arxiv.org/abs/1502.03167) """ def __init__(self, training=None, **kwargs): super(FreezableBatchNorm, self).__init__(**kwargs) self._training = training def call(self, inputs, training=None): if training is None: training = self._training return super(FreezableBatchNorm, self).call(inputs, training=training)
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/core/freezable_batch_norm.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.core.target_assigner.""" import numpy as np import tensorflow as tf from object_detection.box_coders import keypoint_box_coder from object_detection.box_coders import mean_stddev_box_coder from object_detection.core import box_list from object_detection.core import region_similarity_calculator from object_detection.core import standard_fields as fields from object_detection.core import target_assigner as targetassigner from object_detection.matchers import argmax_matcher from object_detection.matchers import bipartite_matcher from object_detection.utils import test_case class TargetAssignerTest(test_case.TestCase): def test_assign_agnostic(self): def graph_fn(anchor_means, groundtruth_box_corners): similarity_calc = region_similarity_calculator.IouSimilarity() matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, unmatched_threshold=0.5) box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) target_assigner = targetassigner.TargetAssigner( similarity_calc, matcher, box_coder) anchors_boxlist = box_list.BoxList(anchor_means) groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) result = target_assigner.assign( anchors_boxlist, groundtruth_boxlist, unmatched_class_label=None) (cls_targets, cls_weights, reg_targets, reg_weights, _) = result return (cls_targets, cls_weights, reg_targets, reg_weights) anchor_means = np.array([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8], [0, 0.5, .5, 1.0]], dtype=np.float32) groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.9, 0.9]], dtype=np.float32) exp_cls_targets = [[1], [1], [0]] exp_cls_weights = [[1], [1], [1]] exp_reg_targets = [[0, 0, 0, 0], [0, 0, -1, 1], [0, 0, 0, 0]] exp_reg_weights = [1, 1, 0] (cls_targets_out, cls_weights_out, reg_targets_out, reg_weights_out) = self.execute( graph_fn, [anchor_means, groundtruth_box_corners]) self.assertAllClose(cls_targets_out, exp_cls_targets) self.assertAllClose(cls_weights_out, exp_cls_weights) self.assertAllClose(reg_targets_out, exp_reg_targets) self.assertAllClose(reg_weights_out, exp_reg_weights) self.assertEquals(cls_targets_out.dtype, np.float32) self.assertEquals(cls_weights_out.dtype, np.float32) self.assertEquals(reg_targets_out.dtype, np.float32) self.assertEquals(reg_weights_out.dtype, np.float32) def test_assign_class_agnostic_with_ignored_matches(self): # Note: test is very similar to above. The third box matched with an IOU # of 0.35, which is between the matched and unmatched threshold. This means # That like above the expected classification targets are [1, 1, 0]. # Unlike above, the third target is ignored and therefore expected # classification weights are [1, 1, 0]. def graph_fn(anchor_means, groundtruth_box_corners): similarity_calc = region_similarity_calculator.IouSimilarity() matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, unmatched_threshold=0.3) box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) target_assigner = targetassigner.TargetAssigner( similarity_calc, matcher, box_coder) anchors_boxlist = box_list.BoxList(anchor_means) groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) result = target_assigner.assign( anchors_boxlist, groundtruth_boxlist, unmatched_class_label=None) (cls_targets, cls_weights, reg_targets, reg_weights, _) = result return (cls_targets, cls_weights, reg_targets, reg_weights) anchor_means = np.array([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8], [0.0, 0.5, .9, 1.0]], dtype=np.float32) groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.9, 0.9]], dtype=np.float32) exp_cls_targets = [[1], [1], [0]] exp_cls_weights = [[1], [1], [0]] exp_reg_targets = [[0, 0, 0, 0], [0, 0, -1, 1], [0, 0, 0, 0]] exp_reg_weights = [1, 1, 0] (cls_targets_out, cls_weights_out, reg_targets_out, reg_weights_out) = self.execute( graph_fn, [anchor_means, groundtruth_box_corners]) self.assertAllClose(cls_targets_out, exp_cls_targets) self.assertAllClose(cls_weights_out, exp_cls_weights) self.assertAllClose(reg_targets_out, exp_reg_targets) self.assertAllClose(reg_weights_out, exp_reg_weights) self.assertEquals(cls_targets_out.dtype, np.float32) self.assertEquals(cls_weights_out.dtype, np.float32) self.assertEquals(reg_targets_out.dtype, np.float32) self.assertEquals(reg_weights_out.dtype, np.float32) def test_assign_agnostic_with_keypoints(self): def graph_fn(anchor_means, groundtruth_box_corners, groundtruth_keypoints): similarity_calc = region_similarity_calculator.IouSimilarity() matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, unmatched_threshold=0.5) box_coder = keypoint_box_coder.KeypointBoxCoder( num_keypoints=6, scale_factors=[10.0, 10.0, 5.0, 5.0]) target_assigner = targetassigner.TargetAssigner( similarity_calc, matcher, box_coder) anchors_boxlist = box_list.BoxList(anchor_means) groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) groundtruth_boxlist.add_field(fields.BoxListFields.keypoints, groundtruth_keypoints) result = target_assigner.assign( anchors_boxlist, groundtruth_boxlist, unmatched_class_label=None) (cls_targets, cls_weights, reg_targets, reg_weights, _) = result return (cls_targets, cls_weights, reg_targets, reg_weights) anchor_means = np.array([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 1.0], [0.0, 0.5, .9, 1.0]], dtype=np.float32) groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5], [0.45, 0.45, 0.95, 0.95]], dtype=np.float32) groundtruth_keypoints = np.array( [[[0.1, 0.2], [0.1, 0.3], [0.2, 0.2], [0.2, 0.2], [0.1, 0.1], [0.9, 0]], [[0, 0.3], [0.2, 0.4], [0.5, 0.6], [0, 0.6], [0.8, 0.2], [0.2, 0.4]]], dtype=np.float32) exp_cls_targets = [[1], [1], [0]] exp_cls_weights = [[1], [1], [1]] exp_reg_targets = [[0, 0, 0, 0, -3, -1, -3, 1, -1, -1, -1, -1, -3, -3, 13, -5], [-1, -1, 0, 0, -15, -9, -11, -7, -5, -3, -15, -3, 1, -11, -11, -7], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] exp_reg_weights = [1, 1, 0] (cls_targets_out, cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(graph_fn, [anchor_means, groundtruth_box_corners, groundtruth_keypoints]) self.assertAllClose(cls_targets_out, exp_cls_targets) self.assertAllClose(cls_weights_out, exp_cls_weights) self.assertAllClose(reg_targets_out, exp_reg_targets) self.assertAllClose(reg_weights_out, exp_reg_weights) self.assertEquals(cls_targets_out.dtype, np.float32) self.assertEquals(cls_weights_out.dtype, np.float32) self.assertEquals(reg_targets_out.dtype, np.float32) self.assertEquals(reg_weights_out.dtype, np.float32) def test_assign_class_agnostic_with_keypoints_and_ignored_matches(self): # Note: test is very similar to above. The third box matched with an IOU # of 0.35, which is between the matched and unmatched threshold. This means # That like above the expected classification targets are [1, 1, 0]. # Unlike above, the third target is ignored and therefore expected # classification weights are [1, 1, 0]. def graph_fn(anchor_means, groundtruth_box_corners, groundtruth_keypoints): similarity_calc = region_similarity_calculator.IouSimilarity() matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, unmatched_threshold=0.5) box_coder = keypoint_box_coder.KeypointBoxCoder( num_keypoints=6, scale_factors=[10.0, 10.0, 5.0, 5.0]) target_assigner = targetassigner.TargetAssigner( similarity_calc, matcher, box_coder) anchors_boxlist = box_list.BoxList(anchor_means) groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) groundtruth_boxlist.add_field(fields.BoxListFields.keypoints, groundtruth_keypoints) result = target_assigner.assign( anchors_boxlist, groundtruth_boxlist, unmatched_class_label=None) (cls_targets, cls_weights, reg_targets, reg_weights, _) = result return (cls_targets, cls_weights, reg_targets, reg_weights) anchor_means = np.array([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 1.0], [0.0, 0.5, .9, 1.0]], dtype=np.float32) groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5], [0.45, 0.45, 0.95, 0.95]], dtype=np.float32) groundtruth_keypoints = np.array( [[[0.1, 0.2], [0.1, 0.3], [0.2, 0.2], [0.2, 0.2], [0.1, 0.1], [0.9, 0]], [[0, 0.3], [0.2, 0.4], [0.5, 0.6], [0, 0.6], [0.8, 0.2], [0.2, 0.4]]], dtype=np.float32) exp_cls_targets = [[1], [1], [0]] exp_cls_weights = [[1], [1], [1]] exp_reg_targets = [[0, 0, 0, 0, -3, -1, -3, 1, -1, -1, -1, -1, -3, -3, 13, -5], [-1, -1, 0, 0, -15, -9, -11, -7, -5, -3, -15, -3, 1, -11, -11, -7], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] exp_reg_weights = [1, 1, 0] (cls_targets_out, cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(graph_fn, [anchor_means, groundtruth_box_corners, groundtruth_keypoints]) self.assertAllClose(cls_targets_out, exp_cls_targets) self.assertAllClose(cls_weights_out, exp_cls_weights) self.assertAllClose(reg_targets_out, exp_reg_targets) self.assertAllClose(reg_weights_out, exp_reg_weights) self.assertEquals(cls_targets_out.dtype, np.float32) self.assertEquals(cls_weights_out.dtype, np.float32) self.assertEquals(reg_targets_out.dtype, np.float32) self.assertEquals(reg_weights_out.dtype, np.float32) def test_assign_multiclass(self): def graph_fn(anchor_means, groundtruth_box_corners, groundtruth_labels): similarity_calc = region_similarity_calculator.IouSimilarity() matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, unmatched_threshold=0.5) box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) unmatched_class_label = tf.constant([1, 0, 0, 0, 0, 0, 0], tf.float32) target_assigner = targetassigner.TargetAssigner( similarity_calc, matcher, box_coder) anchors_boxlist = box_list.BoxList(anchor_means) groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) result = target_assigner.assign( anchors_boxlist, groundtruth_boxlist, groundtruth_labels, unmatched_class_label=unmatched_class_label) (cls_targets, cls_weights, reg_targets, reg_weights, _) = result return (cls_targets, cls_weights, reg_targets, reg_weights) anchor_means = np.array([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8], [0, 0.5, .5, 1.0], [.75, 0, 1.0, .25]], dtype=np.float32) groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.9, 0.9], [.75, 0, .95, .27]], dtype=np.float32) groundtruth_labels = np.array([[0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 1, 0, 0, 0]], dtype=np.float32) exp_cls_targets = [[0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0], [1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0]] exp_cls_weights = [[1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1]] exp_reg_targets = [[0, 0, 0, 0], [0, 0, -1, 1], [0, 0, 0, 0], [0, 0, -.5, .2]] exp_reg_weights = [1, 1, 0, 1] (cls_targets_out, cls_weights_out, reg_targets_out, reg_weights_out) = self.execute( graph_fn, [anchor_means, groundtruth_box_corners, groundtruth_labels]) self.assertAllClose(cls_targets_out, exp_cls_targets) self.assertAllClose(cls_weights_out, exp_cls_weights) self.assertAllClose(reg_targets_out, exp_reg_targets) self.assertAllClose(reg_weights_out, exp_reg_weights) self.assertEquals(cls_targets_out.dtype, np.float32) self.assertEquals(cls_weights_out.dtype, np.float32) self.assertEquals(reg_targets_out.dtype, np.float32) self.assertEquals(reg_weights_out.dtype, np.float32) def test_assign_multiclass_with_groundtruth_weights(self): def graph_fn(anchor_means, groundtruth_box_corners, groundtruth_labels, groundtruth_weights): similarity_calc = region_similarity_calculator.IouSimilarity() matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, unmatched_threshold=0.5) box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) unmatched_class_label = tf.constant([1, 0, 0, 0, 0, 0, 0], tf.float32) target_assigner = targetassigner.TargetAssigner( similarity_calc, matcher, box_coder) anchors_boxlist = box_list.BoxList(anchor_means) groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) result = target_assigner.assign( anchors_boxlist, groundtruth_boxlist, groundtruth_labels, unmatched_class_label=unmatched_class_label, groundtruth_weights=groundtruth_weights) (_, cls_weights, _, reg_weights, _) = result return (cls_weights, reg_weights) anchor_means = np.array([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8], [0, 0.5, .5, 1.0], [.75, 0, 1.0, .25]], dtype=np.float32) groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.9, 0.9], [.75, 0, .95, .27]], dtype=np.float32) groundtruth_labels = np.array([[0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 1, 0, 0, 0]], dtype=np.float32) groundtruth_weights = np.array([0.3, 0., 0.5], dtype=np.float32) # background class gets weight of 1. exp_cls_weights = [[0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3], [0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]] exp_reg_weights = [0.3, 0., 0., 0.5] # background class gets weight of 0. (cls_weights_out, reg_weights_out) = self.execute(graph_fn, [ anchor_means, groundtruth_box_corners, groundtruth_labels, groundtruth_weights ]) self.assertAllClose(cls_weights_out, exp_cls_weights) self.assertAllClose(reg_weights_out, exp_reg_weights) def test_assign_multidimensional_class_targets(self): def graph_fn(anchor_means, groundtruth_box_corners, groundtruth_labels): similarity_calc = region_similarity_calculator.IouSimilarity() matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, unmatched_threshold=0.5) box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) unmatched_class_label = tf.constant([[0, 0], [0, 0]], tf.float32) target_assigner = targetassigner.TargetAssigner( similarity_calc, matcher, box_coder) anchors_boxlist = box_list.BoxList(anchor_means) groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) result = target_assigner.assign( anchors_boxlist, groundtruth_boxlist, groundtruth_labels, unmatched_class_label=unmatched_class_label) (cls_targets, cls_weights, reg_targets, reg_weights, _) = result return (cls_targets, cls_weights, reg_targets, reg_weights) anchor_means = np.array([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8], [0, 0.5, .5, 1.0], [.75, 0, 1.0, .25]], dtype=np.float32) groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.9, 0.9], [.75, 0, .95, .27]], dtype=np.float32) groundtruth_labels = np.array([[[0, 1], [1, 0]], [[1, 0], [0, 1]], [[0, 1], [1, .5]]], np.float32) exp_cls_targets = [[[0, 1], [1, 0]], [[1, 0], [0, 1]], [[0, 0], [0, 0]], [[0, 1], [1, .5]]] exp_cls_weights = [[[1, 1], [1, 1]], [[1, 1], [1, 1]], [[1, 1], [1, 1]], [[1, 1], [1, 1]]] exp_reg_targets = [[0, 0, 0, 0], [0, 0, -1, 1], [0, 0, 0, 0], [0, 0, -.5, .2]] exp_reg_weights = [1, 1, 0, 1] (cls_targets_out, cls_weights_out, reg_targets_out, reg_weights_out) = self.execute( graph_fn, [anchor_means, groundtruth_box_corners, groundtruth_labels]) self.assertAllClose(cls_targets_out, exp_cls_targets) self.assertAllClose(cls_weights_out, exp_cls_weights) self.assertAllClose(reg_targets_out, exp_reg_targets) self.assertAllClose(reg_weights_out, exp_reg_weights) self.assertEquals(cls_targets_out.dtype, np.float32) self.assertEquals(cls_weights_out.dtype, np.float32) self.assertEquals(reg_targets_out.dtype, np.float32) self.assertEquals(reg_weights_out.dtype, np.float32) def test_assign_empty_groundtruth(self): def graph_fn(anchor_means, groundtruth_box_corners, groundtruth_labels): similarity_calc = region_similarity_calculator.IouSimilarity() matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, unmatched_threshold=0.5) box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) unmatched_class_label = tf.constant([0, 0, 0], tf.float32) anchors_boxlist = box_list.BoxList(anchor_means) groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) target_assigner = targetassigner.TargetAssigner( similarity_calc, matcher, box_coder) result = target_assigner.assign( anchors_boxlist, groundtruth_boxlist, groundtruth_labels, unmatched_class_label=unmatched_class_label) (cls_targets, cls_weights, reg_targets, reg_weights, _) = result return (cls_targets, cls_weights, reg_targets, reg_weights) groundtruth_box_corners = np.zeros((0, 4), dtype=np.float32) groundtruth_labels = np.zeros((0, 3), dtype=np.float32) anchor_means = np.array([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8], [0, 0.5, .5, 1.0], [.75, 0, 1.0, .25]], dtype=np.float32) exp_cls_targets = [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]] exp_cls_weights = [[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]] exp_reg_targets = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]] exp_reg_weights = [0, 0, 0, 0] (cls_targets_out, cls_weights_out, reg_targets_out, reg_weights_out) = self.execute( graph_fn, [anchor_means, groundtruth_box_corners, groundtruth_labels]) self.assertAllClose(cls_targets_out, exp_cls_targets) self.assertAllClose(cls_weights_out, exp_cls_weights) self.assertAllClose(reg_targets_out, exp_reg_targets) self.assertAllClose(reg_weights_out, exp_reg_weights) self.assertEquals(cls_targets_out.dtype, np.float32) self.assertEquals(cls_weights_out.dtype, np.float32) self.assertEquals(reg_targets_out.dtype, np.float32) self.assertEquals(reg_weights_out.dtype, np.float32) def test_raises_error_on_incompatible_groundtruth_boxes_and_labels(self): similarity_calc = region_similarity_calculator.NegSqDistSimilarity() matcher = bipartite_matcher.GreedyBipartiteMatcher() box_coder = mean_stddev_box_coder.MeanStddevBoxCoder() unmatched_class_label = tf.constant([1, 0, 0, 0, 0, 0, 0], tf.float32) target_assigner = targetassigner.TargetAssigner( similarity_calc, matcher, box_coder) prior_means = tf.constant([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8], [0, 0.5, .5, 1.0], [.75, 0, 1.0, .25]]) priors = box_list.BoxList(prior_means) box_corners = [[0.0, 0.0, 0.5, 0.5], [0.0, 0.0, 0.5, 0.8], [0.5, 0.5, 0.9, 0.9], [.75, 0, .95, .27]] boxes = box_list.BoxList(tf.constant(box_corners)) groundtruth_labels = tf.constant([[0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 1, 0, 0, 0]], tf.float32) with self.assertRaisesRegexp(ValueError, 'Unequal shapes'): target_assigner.assign( priors, boxes, groundtruth_labels, unmatched_class_label=unmatched_class_label) def test_raises_error_on_invalid_groundtruth_labels(self): similarity_calc = region_similarity_calculator.NegSqDistSimilarity() matcher = bipartite_matcher.GreedyBipartiteMatcher() box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=1.0) unmatched_class_label = tf.constant([[0, 0], [0, 0], [0, 0]], tf.float32) target_assigner = targetassigner.TargetAssigner( similarity_calc, matcher, box_coder) prior_means = tf.constant([[0.0, 0.0, 0.5, 0.5]]) priors = box_list.BoxList(prior_means) box_corners = [[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.9, 0.9], [.75, 0, .95, .27]] boxes = box_list.BoxList(tf.constant(box_corners)) groundtruth_labels = tf.constant([[[0, 1], [1, 0]]], tf.float32) with self.assertRaises(ValueError): target_assigner.assign( priors, boxes, groundtruth_labels, unmatched_class_label=unmatched_class_label) class BatchTargetAssignerTest(test_case.TestCase): def _get_target_assigner(self): similarity_calc = region_similarity_calculator.IouSimilarity() matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, unmatched_threshold=0.5) box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) return targetassigner.TargetAssigner(similarity_calc, matcher, box_coder) def test_batch_assign_targets(self): def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2): box_list1 = box_list.BoxList(groundtruth_boxlist1) box_list2 = box_list.BoxList(groundtruth_boxlist2) gt_box_batch = [box_list1, box_list2] gt_class_targets = [None, None] anchors_boxlist = box_list.BoxList(anchor_means) agnostic_target_assigner = self._get_target_assigner() (cls_targets, cls_weights, reg_targets, reg_weights, _) = targetassigner.batch_assign_targets( agnostic_target_assigner, anchors_boxlist, gt_box_batch, gt_class_targets) return (cls_targets, cls_weights, reg_targets, reg_weights) groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32) groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1], [0.015789, 0.0985, 0.55789, 0.3842]], dtype=np.float32) anchor_means = np.array([[0, 0, .25, .25], [0, .25, 1, 1], [0, .1, .5, .5], [.75, .75, 1, 1]], dtype=np.float32) exp_cls_targets = [[[1], [0], [0], [0]], [[0], [1], [1], [0]]] exp_cls_weights = [[[1], [1], [1], [1]], [[1], [1], [1], [1]]] exp_reg_targets = [[[0, 0, -0.5, -0.5], [0, 0, 0, 0], [0, 0, 0, 0,], [0, 0, 0, 0,],], [[0, 0, 0, 0,], [0, 0.01231521, 0, 0], [0.15789001, -0.01500003, 0.57889998, -1.15799987], [0, 0, 0, 0]]] exp_reg_weights = [[1, 0, 0, 0], [0, 1, 1, 0]] (cls_targets_out, cls_weights_out, reg_targets_out, reg_weights_out) = self.execute( graph_fn, [anchor_means, groundtruth_boxlist1, groundtruth_boxlist2]) self.assertAllClose(cls_targets_out, exp_cls_targets) self.assertAllClose(cls_weights_out, exp_cls_weights) self.assertAllClose(reg_targets_out, exp_reg_targets) self.assertAllClose(reg_weights_out, exp_reg_weights) def test_batch_assign_multiclass_targets(self): def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, class_targets1, class_targets2): box_list1 = box_list.BoxList(groundtruth_boxlist1) box_list2 = box_list.BoxList(groundtruth_boxlist2) gt_box_batch = [box_list1, box_list2] gt_class_targets = [class_targets1, class_targets2] anchors_boxlist = box_list.BoxList(anchor_means) multiclass_target_assigner = self._get_target_assigner() num_classes = 3 unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32) (cls_targets, cls_weights, reg_targets, reg_weights, _) = targetassigner.batch_assign_targets( multiclass_target_assigner, anchors_boxlist, gt_box_batch, gt_class_targets, unmatched_class_label) return (cls_targets, cls_weights, reg_targets, reg_weights) groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32) groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1], [0.015789, 0.0985, 0.55789, 0.3842]], dtype=np.float32) class_targets1 = np.array([[0, 1, 0, 0]], dtype=np.float32) class_targets2 = np.array([[0, 0, 0, 1], [0, 0, 1, 0]], dtype=np.float32) anchor_means = np.array([[0, 0, .25, .25], [0, .25, 1, 1], [0, .1, .5, .5], [.75, .75, 1, 1]], dtype=np.float32) exp_cls_targets = [[[0, 1, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0]], [[1, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [1, 0, 0, 0]]] exp_cls_weights = [[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] exp_reg_targets = [[[0, 0, -0.5, -0.5], [0, 0, 0, 0], [0, 0, 0, 0,], [0, 0, 0, 0,],], [[0, 0, 0, 0,], [0, 0.01231521, 0, 0], [0.15789001, -0.01500003, 0.57889998, -1.15799987], [0, 0, 0, 0]]] exp_reg_weights = [[1, 0, 0, 0], [0, 1, 1, 0]] (cls_targets_out, cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(graph_fn, [ anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, class_targets1, class_targets2 ]) self.assertAllClose(cls_targets_out, exp_cls_targets) self.assertAllClose(cls_weights_out, exp_cls_weights) self.assertAllClose(reg_targets_out, exp_reg_targets) self.assertAllClose(reg_weights_out, exp_reg_weights) def test_batch_assign_multiclass_targets_with_padded_groundtruth(self): def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, class_targets1, class_targets2, groundtruth_weights1, groundtruth_weights2): box_list1 = box_list.BoxList(groundtruth_boxlist1) box_list2 = box_list.BoxList(groundtruth_boxlist2) gt_box_batch = [box_list1, box_list2] gt_class_targets = [class_targets1, class_targets2] gt_weights = [groundtruth_weights1, groundtruth_weights2] anchors_boxlist = box_list.BoxList(anchor_means) multiclass_target_assigner = self._get_target_assigner() num_classes = 3 unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32) (cls_targets, cls_weights, reg_targets, reg_weights, _) = targetassigner.batch_assign_targets( multiclass_target_assigner, anchors_boxlist, gt_box_batch, gt_class_targets, unmatched_class_label, gt_weights) return (cls_targets, cls_weights, reg_targets, reg_weights) groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2], [0., 0., 0., 0.]], dtype=np.float32) groundtruth_weights1 = np.array([1, 0], dtype=np.float32) groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1], [0.015789, 0.0985, 0.55789, 0.3842], [0, 0, 0, 0]], dtype=np.float32) groundtruth_weights2 = np.array([1, 1, 0], dtype=np.float32) class_targets1 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], dtype=np.float32) class_targets2 = np.array([[0, 0, 0, 1], [0, 0, 1, 0], [0, 0, 0, 0]], dtype=np.float32) anchor_means = np.array([[0, 0, .25, .25], [0, .25, 1, 1], [0, .1, .5, .5], [.75, .75, 1, 1]], dtype=np.float32) exp_cls_targets = [[[0, 1, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0]], [[1, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [1, 0, 0, 0]]] exp_cls_weights = [[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] exp_reg_targets = [[[0, 0, -0.5, -0.5], [0, 0, 0, 0], [0, 0, 0, 0,], [0, 0, 0, 0,],], [[0, 0, 0, 0,], [0, 0.01231521, 0, 0], [0.15789001, -0.01500003, 0.57889998, -1.15799987], [0, 0, 0, 0]]] exp_reg_weights = [[1, 0, 0, 0], [0, 1, 1, 0]] (cls_targets_out, cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(graph_fn, [ anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, class_targets1, class_targets2, groundtruth_weights1, groundtruth_weights2 ]) self.assertAllClose(cls_targets_out, exp_cls_targets) self.assertAllClose(cls_weights_out, exp_cls_weights) self.assertAllClose(reg_targets_out, exp_reg_targets) self.assertAllClose(reg_weights_out, exp_reg_weights) def test_batch_assign_multidimensional_targets(self): def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, class_targets1, class_targets2): box_list1 = box_list.BoxList(groundtruth_boxlist1) box_list2 = box_list.BoxList(groundtruth_boxlist2) gt_box_batch = [box_list1, box_list2] gt_class_targets = [class_targets1, class_targets2] anchors_boxlist = box_list.BoxList(anchor_means) multiclass_target_assigner = self._get_target_assigner() target_dimensions = (2, 3) unmatched_class_label = tf.constant(np.zeros(target_dimensions), tf.float32) (cls_targets, cls_weights, reg_targets, reg_weights, _) = targetassigner.batch_assign_targets( multiclass_target_assigner, anchors_boxlist, gt_box_batch, gt_class_targets, unmatched_class_label) return (cls_targets, cls_weights, reg_targets, reg_weights) groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32) groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1], [0.015789, 0.0985, 0.55789, 0.3842]], dtype=np.float32) class_targets1 = np.array([[0, 1, 0, 0]], dtype=np.float32) class_targets2 = np.array([[0, 0, 0, 1], [0, 0, 1, 0]], dtype=np.float32) class_targets1 = np.array([[[0, 1, 1], [1, 1, 0]]], dtype=np.float32) class_targets2 = np.array([[[0, 1, 1], [1, 1, 0]], [[0, 0, 1], [0, 0, 1]]], dtype=np.float32) anchor_means = np.array([[0, 0, .25, .25], [0, .25, 1, 1], [0, .1, .5, .5], [.75, .75, 1, 1]], dtype=np.float32) exp_cls_targets = [[[[0., 1., 1.], [1., 1., 0.]], [[0., 0., 0.], [0., 0., 0.]], [[0., 0., 0.], [0., 0., 0.]], [[0., 0., 0.], [0., 0., 0.]]], [[[0., 0., 0.], [0., 0., 0.]], [[0., 1., 1.], [1., 1., 0.]], [[0., 0., 1.], [0., 0., 1.]], [[0., 0., 0.], [0., 0., 0.]]]] exp_cls_weights = [[[[1., 1., 1.], [1., 1., 1.]], [[1., 1., 1.], [1., 1., 1.]], [[1., 1., 1.], [1., 1., 1.]], [[1., 1., 1.], [1., 1., 1.]]], [[[1., 1., 1.], [1., 1., 1.]], [[1., 1., 1.], [1., 1., 1.]], [[1., 1., 1.], [1., 1., 1.]], [[1., 1., 1.], [1., 1., 1.]]]] exp_reg_targets = [[[0, 0, -0.5, -0.5], [0, 0, 0, 0], [0, 0, 0, 0,], [0, 0, 0, 0,],], [[0, 0, 0, 0,], [0, 0.01231521, 0, 0], [0.15789001, -0.01500003, 0.57889998, -1.15799987], [0, 0, 0, 0]]] exp_reg_weights = [[1, 0, 0, 0], [0, 1, 1, 0]] (cls_targets_out, cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(graph_fn, [ anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, class_targets1, class_targets2 ]) self.assertAllClose(cls_targets_out, exp_cls_targets) self.assertAllClose(cls_weights_out, exp_cls_weights) self.assertAllClose(reg_targets_out, exp_reg_targets) self.assertAllClose(reg_weights_out, exp_reg_weights) def test_batch_assign_empty_groundtruth(self): def graph_fn(anchor_means, groundtruth_box_corners, gt_class_targets): groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) gt_box_batch = [groundtruth_boxlist] gt_class_targets_batch = [gt_class_targets] anchors_boxlist = box_list.BoxList(anchor_means) multiclass_target_assigner = self._get_target_assigner() num_classes = 3 unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32) (cls_targets, cls_weights, reg_targets, reg_weights, _) = targetassigner.batch_assign_targets( multiclass_target_assigner, anchors_boxlist, gt_box_batch, gt_class_targets_batch, unmatched_class_label) return (cls_targets, cls_weights, reg_targets, reg_weights) groundtruth_box_corners = np.zeros((0, 4), dtype=np.float32) anchor_means = np.array([[0, 0, .25, .25], [0, .25, 1, 1]], dtype=np.float32) exp_cls_targets = [[[1, 0, 0, 0], [1, 0, 0, 0]]] exp_cls_weights = [[[1, 1, 1, 1], [1, 1, 1, 1]]] exp_reg_targets = [[[0, 0, 0, 0], [0, 0, 0, 0]]] exp_reg_weights = [[0, 0]] num_classes = 3 pad = 1 gt_class_targets = np.zeros((0, num_classes + pad), dtype=np.float32) (cls_targets_out, cls_weights_out, reg_targets_out, reg_weights_out) = self.execute( graph_fn, [anchor_means, groundtruth_box_corners, gt_class_targets]) self.assertAllClose(cls_targets_out, exp_cls_targets) self.assertAllClose(cls_weights_out, exp_cls_weights) self.assertAllClose(reg_targets_out, exp_reg_targets) self.assertAllClose(reg_weights_out, exp_reg_weights) class BatchTargetAssignConfidencesTest(test_case.TestCase): def _get_target_assigner(self): similarity_calc = region_similarity_calculator.IouSimilarity() matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5, unmatched_threshold=0.5) box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) return targetassigner.TargetAssigner(similarity_calc, matcher, box_coder) def test_batch_assign_empty_groundtruth(self): def graph_fn(anchor_means, groundtruth_box_corners, gt_class_confidences): groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners) gt_box_batch = [groundtruth_boxlist] gt_class_confidences_batch = [gt_class_confidences] anchors_boxlist = box_list.BoxList(anchor_means) num_classes = 3 implicit_class_weight = 0.5 unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32) multiclass_target_assigner = self._get_target_assigner() (cls_targets, cls_weights, reg_targets, reg_weights, _) = targetassigner.batch_assign_confidences( multiclass_target_assigner, anchors_boxlist, gt_box_batch, gt_class_confidences_batch, unmatched_class_label=unmatched_class_label, include_background_class=True, implicit_class_weight=implicit_class_weight) return (cls_targets, cls_weights, reg_targets, reg_weights) groundtruth_box_corners = np.zeros((0, 4), dtype=np.float32) anchor_means = np.array([[0, 0, .25, .25], [0, .25, 1, 1]], dtype=np.float32) num_classes = 3 pad = 1 gt_class_confidences = np.zeros((0, num_classes + pad), dtype=np.float32) exp_cls_targets = [[[1, 0, 0, 0], [1, 0, 0, 0]]] exp_cls_weights = [[[0.5, 0.5, 0.5, 0.5], [0.5, 0.5, 0.5, 0.5]]] exp_reg_targets = [[[0, 0, 0, 0], [0, 0, 0, 0]]] exp_reg_weights = [[0, 0]] (cls_targets_out, cls_weights_out, reg_targets_out, reg_weights_out) = self.execute( graph_fn, [anchor_means, groundtruth_box_corners, gt_class_confidences]) self.assertAllClose(cls_targets_out, exp_cls_targets) self.assertAllClose(cls_weights_out, exp_cls_weights) self.assertAllClose(reg_targets_out, exp_reg_targets) self.assertAllClose(reg_weights_out, exp_reg_weights) def test_batch_assign_confidences_agnostic(self): def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2): box_list1 = box_list.BoxList(groundtruth_boxlist1) box_list2 = box_list.BoxList(groundtruth_boxlist2) gt_box_batch = [box_list1, box_list2] gt_class_confidences_batch = [None, None] anchors_boxlist = box_list.BoxList(anchor_means) agnostic_target_assigner = self._get_target_assigner() implicit_class_weight = 0.5 (cls_targets, cls_weights, reg_targets, reg_weights, _) = targetassigner.batch_assign_confidences( agnostic_target_assigner, anchors_boxlist, gt_box_batch, gt_class_confidences_batch, include_background_class=False, implicit_class_weight=implicit_class_weight) return (cls_targets, cls_weights, reg_targets, reg_weights) groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32) groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1], [0.015789, 0.0985, 0.55789, 0.3842]], dtype=np.float32) anchor_means = np.array([[0, 0, .25, .25], [0, .25, 1, 1], [0, .1, .5, .5], [.75, .75, 1, 1]], dtype=np.float32) exp_cls_targets = [[[1], [0], [0], [0]], [[0], [1], [1], [0]]] exp_cls_weights = [[[1], [0.5], [0.5], [0.5]], [[0.5], [1], [1], [0.5]]] exp_reg_targets = [[[0, 0, -0.5, -0.5], [0, 0, 0, 0], [0, 0, 0, 0,], [0, 0, 0, 0,],], [[0, 0, 0, 0,], [0, 0.01231521, 0, 0], [0.15789001, -0.01500003, 0.57889998, -1.15799987], [0, 0, 0, 0]]] exp_reg_weights = [[1, 0, 0, 0], [0, 1, 1, 0]] (cls_targets_out, cls_weights_out, reg_targets_out, reg_weights_out) = self.execute( graph_fn, [anchor_means, groundtruth_boxlist1, groundtruth_boxlist2]) self.assertAllClose(cls_targets_out, exp_cls_targets) self.assertAllClose(cls_weights_out, exp_cls_weights) self.assertAllClose(reg_targets_out, exp_reg_targets) self.assertAllClose(reg_weights_out, exp_reg_weights) def test_batch_assign_confidences_multiclass(self): def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, class_targets1, class_targets2): box_list1 = box_list.BoxList(groundtruth_boxlist1) box_list2 = box_list.BoxList(groundtruth_boxlist2) gt_box_batch = [box_list1, box_list2] gt_class_confidences_batch = [class_targets1, class_targets2] anchors_boxlist = box_list.BoxList(anchor_means) multiclass_target_assigner = self._get_target_assigner() num_classes = 3 implicit_class_weight = 0.5 unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32) (cls_targets, cls_weights, reg_targets, reg_weights, _) = targetassigner.batch_assign_confidences( multiclass_target_assigner, anchors_boxlist, gt_box_batch, gt_class_confidences_batch, unmatched_class_label=unmatched_class_label, include_background_class=True, implicit_class_weight=implicit_class_weight) return (cls_targets, cls_weights, reg_targets, reg_weights) groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32) groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1], [0.015789, 0.0985, 0.55789, 0.3842]], dtype=np.float32) class_targets1 = np.array([[0, 1, 0, 0]], dtype=np.float32) class_targets2 = np.array([[0, 0, 0, 1], [0, 0, -1, 0]], dtype=np.float32) anchor_means = np.array([[0, 0, .25, .25], [0, .25, 1, 1], [0, .1, .5, .5], [.75, .75, 1, 1]], dtype=np.float32) exp_cls_targets = [[[0, 1, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0]], [[1, 0, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [1, 0, 0, 0]]] exp_cls_weights = [[[1, 1, 0.5, 0.5], [0.5, 0.5, 0.5, 0.5], [0.5, 0.5, 0.5, 0.5], [0.5, 0.5, 0.5, 0.5]], [[0.5, 0.5, 0.5, 0.5], [1, 0.5, 0.5, 1], [0.5, 0.5, 1, 0.5], [0.5, 0.5, 0.5, 0.5]]] exp_reg_targets = [[[0, 0, -0.5, -0.5], [0, 0, 0, 0], [0, 0, 0, 0,], [0, 0, 0, 0,],], [[0, 0, 0, 0,], [0, 0.01231521, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]] exp_reg_weights = [[1, 0, 0, 0], [0, 1, 0, 0]] (cls_targets_out, cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(graph_fn, [ anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, class_targets1, class_targets2 ]) self.assertAllClose(cls_targets_out, exp_cls_targets) self.assertAllClose(cls_weights_out, exp_cls_weights) self.assertAllClose(reg_targets_out, exp_reg_targets) self.assertAllClose(reg_weights_out, exp_reg_weights) def test_batch_assign_confidences_multiclass_with_padded_groundtruth(self): def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, class_targets1, class_targets2, groundtruth_weights1, groundtruth_weights2): box_list1 = box_list.BoxList(groundtruth_boxlist1) box_list2 = box_list.BoxList(groundtruth_boxlist2) gt_box_batch = [box_list1, box_list2] gt_class_confidences_batch = [class_targets1, class_targets2] gt_weights = [groundtruth_weights1, groundtruth_weights2] anchors_boxlist = box_list.BoxList(anchor_means) multiclass_target_assigner = self._get_target_assigner() num_classes = 3 unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32) implicit_class_weight = 0.5 (cls_targets, cls_weights, reg_targets, reg_weights, _) = targetassigner.batch_assign_confidences( multiclass_target_assigner, anchors_boxlist, gt_box_batch, gt_class_confidences_batch, gt_weights, unmatched_class_label=unmatched_class_label, include_background_class=True, implicit_class_weight=implicit_class_weight) return (cls_targets, cls_weights, reg_targets, reg_weights) groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2], [0., 0., 0., 0.]], dtype=np.float32) groundtruth_weights1 = np.array([1, 0], dtype=np.float32) groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1], [0.015789, 0.0985, 0.55789, 0.3842], [0, 0, 0, 0]], dtype=np.float32) groundtruth_weights2 = np.array([1, 1, 0], dtype=np.float32) class_targets1 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], dtype=np.float32) class_targets2 = np.array([[0, 0, 0, 1], [0, 0, -1, 0], [0, 0, 0, 0]], dtype=np.float32) anchor_means = np.array([[0, 0, .25, .25], [0, .25, 1, 1], [0, .1, .5, .5], [.75, .75, 1, 1]], dtype=np.float32) exp_cls_targets = [[[0, 1, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0]], [[1, 0, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [1, 0, 0, 0]]] exp_cls_weights = [[[1, 1, 0.5, 0.5], [0.5, 0.5, 0.5, 0.5], [0.5, 0.5, 0.5, 0.5], [0.5, 0.5, 0.5, 0.5]], [[0.5, 0.5, 0.5, 0.5], [1, 0.5, 0.5, 1], [0.5, 0.5, 1, 0.5], [0.5, 0.5, 0.5, 0.5]]] exp_reg_targets = [[[0, 0, -0.5, -0.5], [0, 0, 0, 0], [0, 0, 0, 0,], [0, 0, 0, 0,],], [[0, 0, 0, 0,], [0, 0.01231521, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]] exp_reg_weights = [[1, 0, 0, 0], [0, 1, 0, 0]] (cls_targets_out, cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(graph_fn, [ anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, class_targets1, class_targets2, groundtruth_weights1, groundtruth_weights2 ]) self.assertAllClose(cls_targets_out, exp_cls_targets) self.assertAllClose(cls_weights_out, exp_cls_weights) self.assertAllClose(reg_targets_out, exp_reg_targets) self.assertAllClose(reg_weights_out, exp_reg_weights) def test_batch_assign_confidences_multidimensional(self): def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, class_targets1, class_targets2): box_list1 = box_list.BoxList(groundtruth_boxlist1) box_list2 = box_list.BoxList(groundtruth_boxlist2) gt_box_batch = [box_list1, box_list2] gt_class_confidences_batch = [class_targets1, class_targets2] anchors_boxlist = box_list.BoxList(anchor_means) multiclass_target_assigner = self._get_target_assigner() target_dimensions = (2, 3) unmatched_class_label = tf.constant(np.zeros(target_dimensions), tf.float32) implicit_class_weight = 0.5 (cls_targets, cls_weights, reg_targets, reg_weights, _) = targetassigner.batch_assign_confidences( multiclass_target_assigner, anchors_boxlist, gt_box_batch, gt_class_confidences_batch, unmatched_class_label=unmatched_class_label, include_background_class=True, implicit_class_weight=implicit_class_weight) return (cls_targets, cls_weights, reg_targets, reg_weights) groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32) groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1], [0.015789, 0.0985, 0.55789, 0.3842]], dtype=np.float32) class_targets1 = np.array([[0, 1, 0, 0]], dtype=np.float32) class_targets2 = np.array([[0, 0, 0, 1], [0, 0, 1, 0]], dtype=np.float32) class_targets1 = np.array([[[0, 1, 1], [1, 1, 0]]], dtype=np.float32) class_targets2 = np.array([[[0, 1, 1], [1, 1, 0]], [[0, 0, 1], [0, 0, 1]]], dtype=np.float32) anchor_means = np.array([[0, 0, .25, .25], [0, .25, 1, 1], [0, .1, .5, .5], [.75, .75, 1, 1]], dtype=np.float32) with self.assertRaises(ValueError): _, _, _, _ = self.execute(graph_fn, [ anchor_means, groundtruth_boxlist1, groundtruth_boxlist2, class_targets1, class_targets2 ]) class CreateTargetAssignerTest(tf.test.TestCase): def test_create_target_assigner(self): """Tests that named constructor gives working target assigners. TODO(rathodv): Make this test more general. """ corners = [[0.0, 0.0, 1.0, 1.0]] groundtruth = box_list.BoxList(tf.constant(corners)) priors = box_list.BoxList(tf.constant(corners)) multibox_ta = (targetassigner .create_target_assigner('Multibox', stage='proposal')) multibox_ta.assign(priors, groundtruth) # No tests on output, as that may vary arbitrarily as new target assigners # are added. As long as it is constructed correctly and runs without errors, # tests on the individual assigners cover correctness of the assignments. anchors = box_list.BoxList(tf.constant(corners)) faster_rcnn_proposals_ta = (targetassigner .create_target_assigner('FasterRCNN', stage='proposal')) faster_rcnn_proposals_ta.assign(anchors, groundtruth) fast_rcnn_ta = (targetassigner .create_target_assigner('FastRCNN')) fast_rcnn_ta.assign(anchors, groundtruth) faster_rcnn_detection_ta = (targetassigner .create_target_assigner('FasterRCNN', stage='detection')) faster_rcnn_detection_ta.assign(anchors, groundtruth) with self.assertRaises(ValueError): targetassigner.create_target_assigner('InvalidDetector', stage='invalid_stage') if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/core/target_assigner_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Bounding Box List definition. BoxList represents a list of bounding boxes as tensorflow tensors, where each bounding box is represented as a row of 4 numbers, [y_min, x_min, y_max, x_max]. It is assumed that all bounding boxes within a given list correspond to a single image. See also box_list_ops.py for common box related operations (such as area, iou, etc). Optionally, users can add additional related fields (such as weights). We assume the following things to be true about fields: * they correspond to boxes in the box_list along the 0th dimension * they have inferrable rank at graph construction time * all dimensions except for possibly the 0th can be inferred (i.e., not None) at graph construction time. Some other notes: * Following tensorflow conventions, we use height, width ordering, and correspondingly, y,x (or ymin, xmin, ymax, xmax) ordering * Tensors are always provided as (flat) [N, 4] tensors. """ import tensorflow as tf class BoxList(object): """Box collection.""" def __init__(self, boxes): """Constructs box collection. Args: boxes: a tensor of shape [N, 4] representing box corners Raises: ValueError: if invalid dimensions for bbox data or if bbox data is not in float32 format. """ if len(boxes.get_shape()) != 2 or boxes.get_shape()[-1] != 4: raise ValueError('Invalid dimensions for box data.') if boxes.dtype != tf.float32: raise ValueError('Invalid tensor type: should be tf.float32') self.data = {'boxes': boxes} def num_boxes(self): """Returns number of boxes held in collection. Returns: a tensor representing the number of boxes held in the collection. """ return tf.shape(self.data['boxes'])[0] def num_boxes_static(self): """Returns number of boxes held in collection. This number is inferred at graph construction time rather than run-time. Returns: Number of boxes held in collection (integer) or None if this is not inferrable at graph construction time. """ return self.data['boxes'].get_shape()[0].value def get_all_fields(self): """Returns all fields.""" return self.data.keys() def get_extra_fields(self): """Returns all non-box fields (i.e., everything not named 'boxes').""" return [k for k in self.data.keys() if k != 'boxes'] def add_field(self, field, field_data): """Add field to box list. This method can be used to add related box data such as weights/labels, etc. Args: field: a string key to access the data via `get` field_data: a tensor containing the data to store in the BoxList """ self.data[field] = field_data def has_field(self, field): return field in self.data def get(self): """Convenience function for accessing box coordinates. Returns: a tensor with shape [N, 4] representing box coordinates. """ return self.get_field('boxes') def set(self, boxes): """Convenience function for setting box coordinates. Args: boxes: a tensor of shape [N, 4] representing box corners Raises: ValueError: if invalid dimensions for bbox data """ if len(boxes.get_shape()) != 2 or boxes.get_shape()[-1] != 4: raise ValueError('Invalid dimensions for box data.') self.data['boxes'] = boxes def get_field(self, field): """Accesses a box collection and associated fields. This function returns specified field with object; if no field is specified, it returns the box coordinates. Args: field: this optional string parameter can be used to specify a related field to be accessed. Returns: a tensor representing the box collection or an associated field. Raises: ValueError: if invalid field """ if not self.has_field(field): raise ValueError('field ' + str(field) + ' does not exist') return self.data[field] def set_field(self, field, value): """Sets the value of a field. Updates the field of a box_list with a given value. Args: field: (string) name of the field to set value. value: the value to assign to the field. Raises: ValueError: if the box_list does not have specified field. """ if not self.has_field(field): raise ValueError('field %s does not exist' % field) self.data[field] = value def get_center_coordinates_and_sizes(self, scope=None): """Computes the center coordinates, height and width of the boxes. Args: scope: name scope of the function. Returns: a list of 4 1-D tensors [ycenter, xcenter, height, width]. """ with tf.name_scope(scope, 'get_center_coordinates_and_sizes'): box_corners = self.get() ymin, xmin, ymax, xmax = tf.unstack(tf.transpose(box_corners)) width = xmax - xmin height = ymax - ymin ycenter = ymin + height / 2. xcenter = xmin + width / 2. return [ycenter, xcenter, height, width] def transpose_coordinates(self, scope=None): """Transpose the coordinate representation in a boxlist. Args: scope: name scope of the function. """ with tf.name_scope(scope, 'transpose_coordinates'): y_min, x_min, y_max, x_max = tf.split( value=self.get(), num_or_size_splits=4, axis=1) self.set(tf.concat([x_min, y_min, x_max, y_max], 1)) def as_tensor_dict(self, fields=None): """Retrieves specified fields as a dictionary of tensors. Args: fields: (optional) list of fields to return in the dictionary. If None (default), all fields are returned. Returns: tensor_dict: A dictionary of tensors specified by fields. Raises: ValueError: if specified field is not contained in boxlist. """ tensor_dict = {} if fields is None: fields = self.get_all_fields() for field in fields: if not self.has_field(field): raise ValueError('boxlist must contain all specified fields') tensor_dict[field] = self.get_field(field) return tensor_dict
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/core/box_list.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Post-processing operations on detected boxes.""" import numpy as np import tensorflow as tf from object_detection.core import box_list from object_detection.core import box_list_ops from object_detection.core import standard_fields as fields from object_detection.utils import shape_utils def multiclass_non_max_suppression(boxes, scores, score_thresh, iou_thresh, max_size_per_class, max_total_size=0, clip_window=None, change_coordinate_frame=False, masks=None, boundaries=None, pad_to_max_output_size=False, additional_fields=None, scope=None): """Multi-class version of non maximum suppression. This op greedily selects a subset of detection bounding boxes, pruning away boxes that have high IOU (intersection over union) overlap (> thresh) with already selected boxes. It operates independently for each class for which scores are provided (via the scores field of the input box_list), pruning boxes with score less than a provided threshold prior to applying NMS. Please note that this operation is performed on *all* classes, therefore any background classes should be removed prior to calling this function. Selected boxes are guaranteed to be sorted in decreasing order by score (but the sort is not guaranteed to be stable). Args: boxes: A [k, q, 4] float32 tensor containing k detections. `q` can be either number of classes or 1 depending on whether a separate box is predicted per class. scores: A [k, num_classes] float32 tensor containing the scores for each of the k detections. The scores have to be non-negative when pad_to_max_output_size is True. score_thresh: scalar threshold for score (low scoring boxes are removed). iou_thresh: scalar threshold for IOU (new boxes that have high IOU overlap with previously selected boxes are removed). max_size_per_class: maximum number of retained boxes per class. max_total_size: maximum number of boxes retained over all classes. By default returns all boxes retained after capping boxes per class. clip_window: A float32 tensor of the form [y_min, x_min, y_max, x_max] representing the window to clip and normalize boxes to before performing non-max suppression. change_coordinate_frame: Whether to normalize coordinates after clipping relative to clip_window (this can only be set to True if a clip_window is provided) masks: (optional) a [k, q, mask_height, mask_width] float32 tensor containing box masks. `q` can be either number of classes or 1 depending on whether a separate mask is predicted per class. boundaries: (optional) a [k, q, boundary_height, boundary_width] float32 tensor containing box boundaries. `q` can be either number of classes or 1 depending on whether a separate boundary is predicted per class. pad_to_max_output_size: If true, the output nmsed boxes are padded to be of length `max_size_per_class`. Defaults to false. additional_fields: (optional) If not None, a dictionary that maps keys to tensors whose first dimensions are all of size `k`. After non-maximum suppression, all tensors corresponding to the selected boxes will be added to resulting BoxList. scope: name scope. Returns: A tuple of sorted_boxes and num_valid_nms_boxes. The sorted_boxes is a BoxList holds M boxes with a rank-1 scores field representing corresponding scores for each box with scores sorted in decreasing order and a rank-1 classes field representing a class label for each box. The num_valid_nms_boxes is a 0-D integer tensor representing the number of valid elements in `BoxList`, with the valid elements appearing first. Raises: ValueError: if iou_thresh is not in [0, 1] or if input boxlist does not have a valid scores field. """ if not 0 <= iou_thresh <= 1.0: raise ValueError('iou_thresh must be between 0 and 1') if scores.shape.ndims != 2: raise ValueError('scores field must be of rank 2') if scores.shape[1].value is None: raise ValueError('scores must have statically defined second ' 'dimension') if boxes.shape.ndims != 3: raise ValueError('boxes must be of rank 3.') if not (boxes.shape[1].value == scores.shape[1].value or boxes.shape[1].value == 1): raise ValueError('second dimension of boxes must be either 1 or equal ' 'to the second dimension of scores') if boxes.shape[2].value != 4: raise ValueError('last dimension of boxes must be of size 4.') if change_coordinate_frame and clip_window is None: raise ValueError('if change_coordinate_frame is True, then a clip_window' 'must be specified.') with tf.name_scope(scope, 'MultiClassNonMaxSuppression'): num_scores = tf.shape(scores)[0] num_classes = scores.get_shape()[1] selected_boxes_list = [] num_valid_nms_boxes_cumulative = tf.constant(0) per_class_boxes_list = tf.unstack(boxes, axis=1) if masks is not None: per_class_masks_list = tf.unstack(masks, axis=1) if boundaries is not None: per_class_boundaries_list = tf.unstack(boundaries, axis=1) boxes_ids = (range(num_classes) if len(per_class_boxes_list) > 1 else [0] * num_classes.value) for class_idx, boxes_idx in zip(range(num_classes), boxes_ids): per_class_boxes = per_class_boxes_list[boxes_idx] boxlist_and_class_scores = box_list.BoxList(per_class_boxes) class_scores = tf.reshape( tf.slice(scores, [0, class_idx], tf.stack([num_scores, 1])), [-1]) boxlist_and_class_scores.add_field(fields.BoxListFields.scores, class_scores) if masks is not None: per_class_masks = per_class_masks_list[boxes_idx] boxlist_and_class_scores.add_field(fields.BoxListFields.masks, per_class_masks) if boundaries is not None: per_class_boundaries = per_class_boundaries_list[boxes_idx] boxlist_and_class_scores.add_field(fields.BoxListFields.boundaries, per_class_boundaries) if additional_fields is not None: for key, tensor in additional_fields.items(): boxlist_and_class_scores.add_field(key, tensor) if pad_to_max_output_size: max_selection_size = max_size_per_class with tf.device('/CPU:0'): selected_indices, num_valid_nms_boxes = ( tf.image.non_max_suppression_padded( boxlist_and_class_scores.get(), boxlist_and_class_scores.get_field(fields.BoxListFields.scores), max_selection_size, iou_threshold=iou_thresh, score_threshold=score_thresh, pad_to_max_output_size=True)) else: max_selection_size = tf.minimum(max_size_per_class, boxlist_and_class_scores.num_boxes()) with tf.device('/CPU:0'): selected_indices = tf.image.non_max_suppression( boxlist_and_class_scores.get(), boxlist_and_class_scores.get_field(fields.BoxListFields.scores), max_selection_size, iou_threshold=iou_thresh, score_threshold=score_thresh) num_valid_nms_boxes = tf.shape(selected_indices)[0] selected_indices = tf.concat( [selected_indices, tf.zeros(max_selection_size-num_valid_nms_boxes, tf.int32)], 0) nms_result = box_list_ops.gather(boxlist_and_class_scores, selected_indices) # Make the scores -1 for invalid boxes. valid_nms_boxes_indx = tf.less( tf.range(max_selection_size), num_valid_nms_boxes) nms_scores = nms_result.get_field(fields.BoxListFields.scores) nms_result.add_field(fields.BoxListFields.scores, tf.where(valid_nms_boxes_indx, nms_scores, -1*tf.ones(max_selection_size))) num_valid_nms_boxes_cumulative += num_valid_nms_boxes nms_result.add_field( fields.BoxListFields.classes, (tf.zeros_like( nms_result.get_field(fields.BoxListFields.scores)) + class_idx)) selected_boxes_list.append(nms_result) selected_boxes = box_list_ops.concatenate(selected_boxes_list) sorted_boxes = box_list_ops.sort_by_field(selected_boxes, fields.BoxListFields.scores) if clip_window is not None: # When pad_to_max_output_size is False, it prunes the boxes with zero # area. sorted_boxes = box_list_ops.clip_to_window( sorted_boxes, clip_window, filter_nonoverlapping=not pad_to_max_output_size) # Set the scores of boxes with zero area to -1 to keep the default # behaviour of pruning out zero area boxes. sorted_boxes_size = tf.shape(sorted_boxes.get())[0] non_zero_box_area = tf.cast(box_list_ops.area(sorted_boxes), tf.bool) sorted_boxes_scores = tf.where( non_zero_box_area, sorted_boxes.get_field(fields.BoxListFields.scores), -1*tf.ones(sorted_boxes_size)) sorted_boxes.add_field(fields.BoxListFields.scores, sorted_boxes_scores) num_valid_nms_boxes_cumulative = tf.reduce_sum( tf.cast(tf.greater_equal(sorted_boxes_scores, 0), tf.int32)) sorted_boxes = box_list_ops.sort_by_field(sorted_boxes, fields.BoxListFields.scores) if change_coordinate_frame: sorted_boxes = box_list_ops.change_coordinate_frame( sorted_boxes, clip_window) if max_total_size: max_total_size = tf.minimum(max_total_size, sorted_boxes.num_boxes()) sorted_boxes = box_list_ops.gather(sorted_boxes, tf.range(max_total_size)) num_valid_nms_boxes_cumulative = tf.where( max_total_size > num_valid_nms_boxes_cumulative, num_valid_nms_boxes_cumulative, max_total_size) # Select only the valid boxes if pad_to_max_output_size is False. if not pad_to_max_output_size: sorted_boxes = box_list_ops.gather( sorted_boxes, tf.range(num_valid_nms_boxes_cumulative)) return sorted_boxes, num_valid_nms_boxes_cumulative def batch_multiclass_non_max_suppression(boxes, scores, score_thresh, iou_thresh, max_size_per_class, max_total_size=0, clip_window=None, change_coordinate_frame=False, num_valid_boxes=None, masks=None, additional_fields=None, scope=None, use_static_shapes=False, parallel_iterations=32): """Multi-class version of non maximum suppression that operates on a batch. This op is similar to `multiclass_non_max_suppression` but operates on a batch of boxes and scores. See documentation for `multiclass_non_max_suppression` for details. Args: boxes: A [batch_size, num_anchors, q, 4] float32 tensor containing detections. If `q` is 1 then same boxes are used for all classes otherwise, if `q` is equal to number of classes, class-specific boxes are used. scores: A [batch_size, num_anchors, num_classes] float32 tensor containing the scores for each of the `num_anchors` detections. The scores have to be non-negative when use_static_shapes is set True. score_thresh: scalar threshold for score (low scoring boxes are removed). iou_thresh: scalar threshold for IOU (new boxes that have high IOU overlap with previously selected boxes are removed). max_size_per_class: maximum number of retained boxes per class. max_total_size: maximum number of boxes retained over all classes. By default returns all boxes retained after capping boxes per class. clip_window: A float32 tensor of shape [batch_size, 4] where each entry is of the form [y_min, x_min, y_max, x_max] representing the window to clip boxes to before performing non-max suppression. This argument can also be a tensor of shape [4] in which case, the same clip window is applied to all images in the batch. If clip_widow is None, all boxes are used to perform non-max suppression. change_coordinate_frame: Whether to normalize coordinates after clipping relative to clip_window (this can only be set to True if a clip_window is provided) num_valid_boxes: (optional) a Tensor of type `int32`. A 1-D tensor of shape [batch_size] representing the number of valid boxes to be considered for each image in the batch. This parameter allows for ignoring zero paddings. masks: (optional) a [batch_size, num_anchors, q, mask_height, mask_width] float32 tensor containing box masks. `q` can be either number of classes or 1 depending on whether a separate mask is predicted per class. additional_fields: (optional) If not None, a dictionary that maps keys to tensors whose dimensions are [batch_size, num_anchors, ...]. scope: tf scope name. use_static_shapes: If true, the output nmsed boxes are padded to be of length `max_size_per_class` and it doesn't clip boxes to max_total_size. Defaults to false. parallel_iterations: (optional) number of batch items to process in parallel. Returns: 'nmsed_boxes': A [batch_size, max_detections, 4] float32 tensor containing the non-max suppressed boxes. 'nmsed_scores': A [batch_size, max_detections] float32 tensor containing the scores for the boxes. 'nmsed_classes': A [batch_size, max_detections] float32 tensor containing the class for boxes. 'nmsed_masks': (optional) a [batch_size, max_detections, mask_height, mask_width] float32 tensor containing masks for each selected box. This is set to None if input `masks` is None. 'nmsed_additional_fields': (optional) a dictionary of [batch_size, max_detections, ...] float32 tensors corresponding to the tensors specified in the input `additional_fields`. This is not returned if input `additional_fields` is None. 'num_detections': A [batch_size] int32 tensor indicating the number of valid detections per batch item. Only the top num_detections[i] entries in nms_boxes[i], nms_scores[i] and nms_class[i] are valid. The rest of the entries are zero paddings. Raises: ValueError: if `q` in boxes.shape is not 1 or not equal to number of classes as inferred from scores.shape. """ q = boxes.shape[2].value num_classes = scores.shape[2].value if q != 1 and q != num_classes: raise ValueError('third dimension of boxes must be either 1 or equal ' 'to the third dimension of scores') if change_coordinate_frame and clip_window is None: raise ValueError('if change_coordinate_frame is True, then a clip_window' 'must be specified.') original_masks = masks original_additional_fields = additional_fields with tf.name_scope(scope, 'BatchMultiClassNonMaxSuppression'): boxes_shape = boxes.shape batch_size = boxes_shape[0].value num_anchors = boxes_shape[1].value if batch_size is None: batch_size = tf.shape(boxes)[0] if num_anchors is None: num_anchors = tf.shape(boxes)[1] # If num valid boxes aren't provided, create one and mark all boxes as # valid. if num_valid_boxes is None: num_valid_boxes = tf.ones([batch_size], dtype=tf.int32) * num_anchors # If masks aren't provided, create dummy masks so we can only have one copy # of _single_image_nms_fn and discard the dummy masks after map_fn. if masks is None: masks_shape = tf.stack([batch_size, num_anchors, q, 1, 1]) masks = tf.zeros(masks_shape) if clip_window is None: clip_window = tf.stack([ tf.reduce_min(boxes[:, :, :, 0]), tf.reduce_min(boxes[:, :, :, 1]), tf.reduce_max(boxes[:, :, :, 2]), tf.reduce_max(boxes[:, :, :, 3]) ]) if clip_window.shape.ndims == 1: clip_window = tf.tile(tf.expand_dims(clip_window, 0), [batch_size, 1]) if additional_fields is None: additional_fields = {} def _single_image_nms_fn(args): """Runs NMS on a single image and returns padded output. Args: args: A list of tensors consisting of the following: per_image_boxes - A [num_anchors, q, 4] float32 tensor containing detections. If `q` is 1 then same boxes are used for all classes otherwise, if `q` is equal to number of classes, class-specific boxes are used. per_image_scores - A [num_anchors, num_classes] float32 tensor containing the scores for each of the `num_anchors` detections. per_image_masks - A [num_anchors, q, mask_height, mask_width] float32 tensor containing box masks. `q` can be either number of classes or 1 depending on whether a separate mask is predicted per class. per_image_clip_window - A 1D float32 tensor of the form [ymin, xmin, ymax, xmax] representing the window to clip the boxes to. per_image_additional_fields - (optional) A variable number of float32 tensors each with size [num_anchors, ...]. per_image_num_valid_boxes - A tensor of type `int32`. A 1-D tensor of shape [batch_size] representing the number of valid boxes to be considered for each image in the batch. This parameter allows for ignoring zero paddings. Returns: 'nmsed_boxes': A [max_detections, 4] float32 tensor containing the non-max suppressed boxes. 'nmsed_scores': A [max_detections] float32 tensor containing the scores for the boxes. 'nmsed_classes': A [max_detections] float32 tensor containing the class for boxes. 'nmsed_masks': (optional) a [max_detections, mask_height, mask_width] float32 tensor containing masks for each selected box. This is set to None if input `masks` is None. 'nmsed_additional_fields': (optional) A variable number of float32 tensors each with size [max_detections, ...] corresponding to the input `per_image_additional_fields`. 'num_detections': A [batch_size] int32 tensor indicating the number of valid detections per batch item. Only the top num_detections[i] entries in nms_boxes[i], nms_scores[i] and nms_class[i] are valid. The rest of the entries are zero paddings. """ per_image_boxes = args[0] per_image_scores = args[1] per_image_masks = args[2] per_image_clip_window = args[3] per_image_additional_fields = { key: value for key, value in zip(additional_fields, args[4:-1]) } per_image_num_valid_boxes = args[-1] if use_static_shapes: total_proposals = tf.shape(per_image_scores) per_image_scores = tf.where( tf.less(tf.range(total_proposals[0]), per_image_num_valid_boxes), per_image_scores, tf.fill(total_proposals, np.finfo('float32').min)) else: per_image_boxes = tf.reshape( tf.slice(per_image_boxes, 3 * [0], tf.stack([per_image_num_valid_boxes, -1, -1])), [-1, q, 4]) per_image_scores = tf.reshape( tf.slice(per_image_scores, [0, 0], tf.stack([per_image_num_valid_boxes, -1])), [-1, num_classes]) per_image_masks = tf.reshape( tf.slice(per_image_masks, 4 * [0], tf.stack([per_image_num_valid_boxes, -1, -1, -1])), [-1, q, per_image_masks.shape[2].value, per_image_masks.shape[3].value]) if per_image_additional_fields is not None: for key, tensor in per_image_additional_fields.items(): additional_field_shape = tensor.get_shape() additional_field_dim = len(additional_field_shape) per_image_additional_fields[key] = tf.reshape( tf.slice(per_image_additional_fields[key], additional_field_dim * [0], tf.stack([per_image_num_valid_boxes] + (additional_field_dim - 1) * [-1])), [-1] + [dim.value for dim in additional_field_shape[1:]]) nmsed_boxlist, num_valid_nms_boxes = multiclass_non_max_suppression( per_image_boxes, per_image_scores, score_thresh, iou_thresh, max_size_per_class, max_total_size, clip_window=per_image_clip_window, change_coordinate_frame=change_coordinate_frame, masks=per_image_masks, pad_to_max_output_size=use_static_shapes, additional_fields=per_image_additional_fields) if not use_static_shapes: nmsed_boxlist = box_list_ops.pad_or_clip_box_list( nmsed_boxlist, max_total_size) num_detections = num_valid_nms_boxes nmsed_boxes = nmsed_boxlist.get() nmsed_scores = nmsed_boxlist.get_field(fields.BoxListFields.scores) nmsed_classes = nmsed_boxlist.get_field(fields.BoxListFields.classes) nmsed_masks = nmsed_boxlist.get_field(fields.BoxListFields.masks) nmsed_additional_fields = [ nmsed_boxlist.get_field(key) for key in per_image_additional_fields ] return ([nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks] + nmsed_additional_fields + [num_detections]) num_additional_fields = 0 if additional_fields is not None: num_additional_fields = len(additional_fields) num_nmsed_outputs = 4 + num_additional_fields batch_outputs = shape_utils.static_or_dynamic_map_fn( _single_image_nms_fn, elems=([boxes, scores, masks, clip_window] + list(additional_fields.values()) + [num_valid_boxes]), dtype=(num_nmsed_outputs * [tf.float32] + [tf.int32]), parallel_iterations=parallel_iterations) batch_nmsed_boxes = batch_outputs[0] batch_nmsed_scores = batch_outputs[1] batch_nmsed_classes = batch_outputs[2] batch_nmsed_masks = batch_outputs[3] batch_nmsed_additional_fields = { key: value for key, value in zip(additional_fields, batch_outputs[4:-1]) } batch_num_detections = batch_outputs[-1] if original_masks is None: batch_nmsed_masks = None if original_additional_fields is None: batch_nmsed_additional_fields = None return (batch_nmsed_boxes, batch_nmsed_scores, batch_nmsed_classes, batch_nmsed_masks, batch_nmsed_additional_fields, batch_num_detections)
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/core/post_processing.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Classification and regression loss functions for object detection. Localization losses: * WeightedL2LocalizationLoss * WeightedSmoothL1LocalizationLoss * WeightedIOULocalizationLoss Classification losses: * WeightedSigmoidClassificationLoss * WeightedSoftmaxClassificationLoss * WeightedSoftmaxClassificationAgainstLogitsLoss * BootstrappedSigmoidClassificationLoss """ from abc import ABCMeta from abc import abstractmethod import tensorflow as tf from object_detection.core import box_list from object_detection.core import box_list_ops from object_detection.utils import ops slim = tf.contrib.slim class Loss(object): """Abstract base class for loss functions.""" __metaclass__ = ABCMeta def __call__(self, prediction_tensor, target_tensor, ignore_nan_targets=False, losses_mask=None, scope=None, **params): """Call the loss function. Args: prediction_tensor: an N-d tensor of shape [batch, anchors, ...] representing predicted quantities. target_tensor: an N-d tensor of shape [batch, anchors, ...] representing regression or classification targets. ignore_nan_targets: whether to ignore nan targets in the loss computation. E.g. can be used if the target tensor is missing groundtruth data that shouldn't be factored into the loss. losses_mask: A [batch] boolean tensor that indicates whether losses should be applied to individual images in the batch. For elements that are True, corresponding prediction, target, and weight tensors will be removed prior to loss computation. If None, no filtering will take place prior to loss computation. scope: Op scope name. Defaults to 'Loss' if None. **params: Additional keyword arguments for specific implementations of the Loss. Returns: loss: a tensor representing the value of the loss function. """ with tf.name_scope(scope, 'Loss', [prediction_tensor, target_tensor, params]) as scope: if ignore_nan_targets: target_tensor = tf.where(tf.is_nan(target_tensor), prediction_tensor, target_tensor) if losses_mask is not None: tensor_multiplier = self._get_loss_multiplier_for_tensor( prediction_tensor, losses_mask) prediction_tensor *= tensor_multiplier target_tensor *= tensor_multiplier if 'weights' in params: params['weights'] = tf.convert_to_tensor(params['weights']) weights_multiplier = self._get_loss_multiplier_for_tensor( params['weights'], losses_mask) params['weights'] *= weights_multiplier return self._compute_loss(prediction_tensor, target_tensor, **params) def _get_loss_multiplier_for_tensor(self, tensor, losses_mask): loss_multiplier_shape = tf.stack([-1] + [1] * (len(tensor.shape) - 1)) return tf.cast(tf.reshape(losses_mask, loss_multiplier_shape), tf.float32) @abstractmethod def _compute_loss(self, prediction_tensor, target_tensor, **params): """Method to be overridden by implementations. Args: prediction_tensor: a tensor representing predicted quantities target_tensor: a tensor representing regression or classification targets **params: Additional keyword arguments for specific implementations of the Loss. Returns: loss: an N-d tensor of shape [batch, anchors, ...] containing the loss per anchor """ pass class WeightedL2LocalizationLoss(Loss): """L2 localization loss function with anchorwise output support. Loss[b,a] = .5 * ||weights[b,a] * (prediction[b,a,:] - target[b,a,:])||^2 """ def _compute_loss(self, prediction_tensor, target_tensor, weights): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, code_size] representing the (encoded) predicted locations of objects. target_tensor: A float tensor of shape [batch_size, num_anchors, code_size] representing the regression targets weights: a float tensor of shape [batch_size, num_anchors] Returns: loss: a float tensor of shape [batch_size, num_anchors] tensor representing the value of the loss function. """ weighted_diff = (prediction_tensor - target_tensor) * tf.expand_dims( weights, 2) square_diff = 0.5 * tf.square(weighted_diff) return tf.reduce_sum(square_diff, 2) class WeightedSmoothL1LocalizationLoss(Loss): """Smooth L1 localization loss function aka Huber Loss.. The smooth L1_loss is defined elementwise as .5 x^2 if |x| <= delta and delta * (|x|- 0.5*delta) otherwise, where x is the difference between predictions and target. See also Equation (3) in the Fast R-CNN paper by Ross Girshick (ICCV 2015) """ def __init__(self, delta=1.0): """Constructor. Args: delta: delta for smooth L1 loss. """ self._delta = delta def _compute_loss(self, prediction_tensor, target_tensor, weights): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, code_size] representing the (encoded) predicted locations of objects. target_tensor: A float tensor of shape [batch_size, num_anchors, code_size] representing the regression targets weights: a float tensor of shape [batch_size, num_anchors] Returns: loss: a float tensor of shape [batch_size, num_anchors] tensor representing the value of the loss function. """ return tf.reduce_sum(tf.losses.huber_loss( target_tensor, prediction_tensor, delta=self._delta, weights=tf.expand_dims(weights, axis=2), loss_collection=None, reduction=tf.losses.Reduction.NONE ), axis=2) class WeightedIOULocalizationLoss(Loss): """IOU localization loss function. Sums the IOU for corresponding pairs of predicted/groundtruth boxes and for each pair assign a loss of 1 - IOU. We then compute a weighted sum over all pairs which is returned as the total loss. """ def _compute_loss(self, prediction_tensor, target_tensor, weights): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, 4] representing the decoded predicted boxes target_tensor: A float tensor of shape [batch_size, num_anchors, 4] representing the decoded target boxes weights: a float tensor of shape [batch_size, num_anchors] Returns: loss: a float tensor of shape [batch_size, num_anchors] tensor representing the value of the loss function. """ predicted_boxes = box_list.BoxList(tf.reshape(prediction_tensor, [-1, 4])) target_boxes = box_list.BoxList(tf.reshape(target_tensor, [-1, 4])) per_anchor_iou_loss = 1.0 - box_list_ops.matched_iou(predicted_boxes, target_boxes) return tf.reshape(weights, [-1]) * per_anchor_iou_loss class WeightedSigmoidClassificationLoss(Loss): """Sigmoid cross entropy classification loss function.""" def _compute_loss(self, prediction_tensor, target_tensor, weights, class_indices=None): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing the predicted logits for each class target_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing one-hot encoded classification targets weights: a float tensor of shape, either [batch_size, num_anchors, num_classes] or [batch_size, num_anchors, 1]. If the shape is [batch_size, num_anchors, 1], all the classses are equally weighted. class_indices: (Optional) A 1-D integer tensor of class indices. If provided, computes loss only for the specified class indices. Returns: loss: a float tensor of shape [batch_size, num_anchors, num_classes] representing the value of the loss function. """ if class_indices is not None: weights *= tf.reshape( ops.indices_to_dense_vector(class_indices, tf.shape(prediction_tensor)[2]), [1, 1, -1]) per_entry_cross_ent = (tf.nn.sigmoid_cross_entropy_with_logits( labels=target_tensor, logits=prediction_tensor)) return per_entry_cross_ent * weights class SigmoidFocalClassificationLoss(Loss): """Sigmoid focal cross entropy loss. Focal loss down-weights well classified examples and focusses on the hard examples. See https://arxiv.org/pdf/1708.02002.pdf for the loss definition. """ def __init__(self, gamma=2.0, alpha=0.25): """Constructor. Args: gamma: exponent of the modulating factor (1 - p_t) ^ gamma. alpha: optional alpha weighting factor to balance positives vs negatives. """ self._alpha = alpha self._gamma = gamma def _compute_loss(self, prediction_tensor, target_tensor, weights, class_indices=None): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing the predicted logits for each class target_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing one-hot encoded classification targets weights: a float tensor of shape, either [batch_size, num_anchors, num_classes] or [batch_size, num_anchors, 1]. If the shape is [batch_size, num_anchors, 1], all the classses are equally weighted. class_indices: (Optional) A 1-D integer tensor of class indices. If provided, computes loss only for the specified class indices. Returns: loss: a float tensor of shape [batch_size, num_anchors, num_classes] representing the value of the loss function. """ if class_indices is not None: weights *= tf.reshape( ops.indices_to_dense_vector(class_indices, tf.shape(prediction_tensor)[2]), [1, 1, -1]) per_entry_cross_ent = (tf.nn.sigmoid_cross_entropy_with_logits( labels=target_tensor, logits=prediction_tensor)) prediction_probabilities = tf.sigmoid(prediction_tensor) p_t = ((target_tensor * prediction_probabilities) + ((1 - target_tensor) * (1 - prediction_probabilities))) modulating_factor = 1.0 if self._gamma: modulating_factor = tf.pow(1.0 - p_t, self._gamma) alpha_weight_factor = 1.0 if self._alpha is not None: alpha_weight_factor = (target_tensor * self._alpha + (1 - target_tensor) * (1 - self._alpha)) focal_cross_entropy_loss = (modulating_factor * alpha_weight_factor * per_entry_cross_ent) return focal_cross_entropy_loss * weights class WeightedSoftmaxClassificationLoss(Loss): """Softmax loss function.""" def __init__(self, logit_scale=1.0): """Constructor. Args: logit_scale: When this value is high, the prediction is "diffused" and when this value is low, the prediction is made peakier. (default 1.0) """ self._logit_scale = logit_scale def _compute_loss(self, prediction_tensor, target_tensor, weights): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing the predicted logits for each class target_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing one-hot encoded classification targets weights: a float tensor of shape, either [batch_size, num_anchors, num_classes] or [batch_size, num_anchors, 1]. If the shape is [batch_size, num_anchors, 1], all the classses are equally weighted. Returns: loss: a float tensor of shape [batch_size, num_anchors] representing the value of the loss function. """ weights = tf.reduce_mean(weights, axis=2) num_classes = prediction_tensor.get_shape().as_list()[-1] prediction_tensor = tf.divide( prediction_tensor, self._logit_scale, name='scale_logit') per_row_cross_ent = (tf.nn.softmax_cross_entropy_with_logits( labels=tf.reshape(target_tensor, [-1, num_classes]), logits=tf.reshape(prediction_tensor, [-1, num_classes]))) return tf.reshape(per_row_cross_ent, tf.shape(weights)) * weights class WeightedSoftmaxClassificationAgainstLogitsLoss(Loss): """Softmax loss function against logits. Targets are expected to be provided in logits space instead of "one hot" or "probability distribution" space. """ def __init__(self, logit_scale=1.0): """Constructor. Args: logit_scale: When this value is high, the target is "diffused" and when this value is low, the target is made peakier. (default 1.0) """ self._logit_scale = logit_scale def _scale_and_softmax_logits(self, logits): """Scale logits then apply softmax.""" scaled_logits = tf.divide(logits, self._logit_scale, name='scale_logits') return tf.nn.softmax(scaled_logits, name='convert_scores') def _compute_loss(self, prediction_tensor, target_tensor, weights): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing the predicted logits for each class target_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing logit classification targets weights: a float tensor of shape, either [batch_size, num_anchors, num_classes] or [batch_size, num_anchors, 1]. If the shape is [batch_size, num_anchors, 1], all the classses are equally weighted. Returns: loss: a float tensor of shape [batch_size, num_anchors] representing the value of the loss function. """ weights = tf.reduce_mean(weights, axis=2) num_classes = prediction_tensor.get_shape().as_list()[-1] target_tensor = self._scale_and_softmax_logits(target_tensor) prediction_tensor = tf.divide(prediction_tensor, self._logit_scale, name='scale_logits') per_row_cross_ent = (tf.nn.softmax_cross_entropy_with_logits( labels=tf.reshape(target_tensor, [-1, num_classes]), logits=tf.reshape(prediction_tensor, [-1, num_classes]))) return tf.reshape(per_row_cross_ent, tf.shape(weights)) * weights class BootstrappedSigmoidClassificationLoss(Loss): """Bootstrapped sigmoid cross entropy classification loss function. This loss uses a convex combination of training labels and the current model's predictions as training targets in the classification loss. The idea is that as the model improves over time, its predictions can be trusted more and we can use these predictions to mitigate the damage of noisy/incorrect labels, because incorrect labels are likely to be eventually highly inconsistent with other stimuli predicted to have the same label by the model. In "soft" bootstrapping, we use all predicted class probabilities, whereas in "hard" bootstrapping, we use the single class favored by the model. See also Training Deep Neural Networks On Noisy Labels with Bootstrapping by Reed et al. (ICLR 2015). """ def __init__(self, alpha, bootstrap_type='soft'): """Constructor. Args: alpha: a float32 scalar tensor between 0 and 1 representing interpolation weight bootstrap_type: set to either 'hard' or 'soft' (default) Raises: ValueError: if bootstrap_type is not either 'hard' or 'soft' """ if bootstrap_type != 'hard' and bootstrap_type != 'soft': raise ValueError('Unrecognized bootstrap_type: must be one of ' '\'hard\' or \'soft.\'') self._alpha = alpha self._bootstrap_type = bootstrap_type def _compute_loss(self, prediction_tensor, target_tensor, weights): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing the predicted logits for each class target_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing one-hot encoded classification targets weights: a float tensor of shape, either [batch_size, num_anchors, num_classes] or [batch_size, num_anchors, 1]. If the shape is [batch_size, num_anchors, 1], all the classses are equally weighted. Returns: loss: a float tensor of shape [batch_size, num_anchors, num_classes] representing the value of the loss function. """ if self._bootstrap_type == 'soft': bootstrap_target_tensor = self._alpha * target_tensor + ( 1.0 - self._alpha) * tf.sigmoid(prediction_tensor) else: bootstrap_target_tensor = self._alpha * target_tensor + ( 1.0 - self._alpha) * tf.cast( tf.sigmoid(prediction_tensor) > 0.5, tf.float32) per_entry_cross_ent = (tf.nn.sigmoid_cross_entropy_with_logits( labels=bootstrap_target_tensor, logits=prediction_tensor)) return per_entry_cross_ent * weights class HardExampleMiner(object): """Hard example mining for regions in a list of images. Implements hard example mining to select a subset of regions to be back-propagated. For each image, selects the regions with highest losses, subject to the condition that a newly selected region cannot have an IOU > iou_threshold with any of the previously selected regions. This can be achieved by re-using a greedy non-maximum suppression algorithm. A constraint on the number of negatives mined per positive region can also be enforced. Reference papers: "Training Region-based Object Detectors with Online Hard Example Mining" (CVPR 2016) by Srivastava et al., and "SSD: Single Shot MultiBox Detector" (ECCV 2016) by Liu et al. """ def __init__(self, num_hard_examples=64, iou_threshold=0.7, loss_type='both', cls_loss_weight=0.05, loc_loss_weight=0.06, max_negatives_per_positive=None, min_negatives_per_image=0): """Constructor. The hard example mining implemented by this class can replicate the behavior in the two aforementioned papers (Srivastava et al., and Liu et al). To replicate the A2 paper (Srivastava et al), num_hard_examples is set to a fixed parameter (64 by default) and iou_threshold is set to .7 for running non-max-suppression the predicted boxes prior to hard mining. In order to replicate the SSD paper (Liu et al), num_hard_examples should be set to None, max_negatives_per_positive should be 3 and iou_threshold should be 1.0 (in order to effectively turn off NMS). Args: num_hard_examples: maximum number of hard examples to be selected per image (prior to enforcing max negative to positive ratio constraint). If set to None, all examples obtained after NMS are considered. iou_threshold: minimum intersection over union for an example to be discarded during NMS. loss_type: use only classification losses ('cls', default), localization losses ('loc') or both losses ('both'). In the last case, cls_loss_weight and loc_loss_weight are used to compute weighted sum of the two losses. cls_loss_weight: weight for classification loss. loc_loss_weight: weight for location loss. max_negatives_per_positive: maximum number of negatives to retain for each positive anchor. By default, num_negatives_per_positive is None, which means that we do not enforce a prespecified negative:positive ratio. Note also that num_negatives_per_positives can be a float (and will be converted to be a float even if it is passed in otherwise). min_negatives_per_image: minimum number of negative anchors to sample for a given image. Setting this to a positive number allows sampling negatives in an image without any positive anchors and thus not biased towards at least one detection per image. """ self._num_hard_examples = num_hard_examples self._iou_threshold = iou_threshold self._loss_type = loss_type self._cls_loss_weight = cls_loss_weight self._loc_loss_weight = loc_loss_weight self._max_negatives_per_positive = max_negatives_per_positive self._min_negatives_per_image = min_negatives_per_image if self._max_negatives_per_positive is not None: self._max_negatives_per_positive = float(self._max_negatives_per_positive) self._num_positives_list = None self._num_negatives_list = None def __call__(self, location_losses, cls_losses, decoded_boxlist_list, match_list=None): """Computes localization and classification losses after hard mining. Args: location_losses: a float tensor of shape [num_images, num_anchors] representing anchorwise localization losses. cls_losses: a float tensor of shape [num_images, num_anchors] representing anchorwise classification losses. decoded_boxlist_list: a list of decoded BoxList representing location predictions for each image. match_list: an optional list of matcher.Match objects encoding the match between anchors and groundtruth boxes for each image of the batch, with rows of the Match objects corresponding to groundtruth boxes and columns corresponding to anchors. Match objects in match_list are used to reference which anchors are positive, negative or ignored. If self._max_negatives_per_positive exists, these are then used to enforce a prespecified negative to positive ratio. Returns: mined_location_loss: a float scalar with sum of localization losses from selected hard examples. mined_cls_loss: a float scalar with sum of classification losses from selected hard examples. Raises: ValueError: if location_losses, cls_losses and decoded_boxlist_list do not have compatible shapes (i.e., they must correspond to the same number of images). ValueError: if match_list is specified but its length does not match len(decoded_boxlist_list). """ mined_location_losses = [] mined_cls_losses = [] location_losses = tf.unstack(location_losses) cls_losses = tf.unstack(cls_losses) num_images = len(decoded_boxlist_list) if not match_list: match_list = num_images * [None] if not len(location_losses) == len(decoded_boxlist_list) == len(cls_losses): raise ValueError('location_losses, cls_losses and decoded_boxlist_list ' 'do not have compatible shapes.') if not isinstance(match_list, list): raise ValueError('match_list must be a list.') if len(match_list) != len(decoded_boxlist_list): raise ValueError('match_list must either be None or have ' 'length=len(decoded_boxlist_list).') num_positives_list = [] num_negatives_list = [] for ind, detection_boxlist in enumerate(decoded_boxlist_list): box_locations = detection_boxlist.get() match = match_list[ind] image_losses = cls_losses[ind] if self._loss_type == 'loc': image_losses = location_losses[ind] elif self._loss_type == 'both': image_losses *= self._cls_loss_weight image_losses += location_losses[ind] * self._loc_loss_weight if self._num_hard_examples is not None: num_hard_examples = self._num_hard_examples else: num_hard_examples = detection_boxlist.num_boxes() with tf.device('/CPU:0'): selected_indices = tf.image.non_max_suppression( box_locations, image_losses, num_hard_examples, self._iou_threshold) if self._max_negatives_per_positive is not None and match: (selected_indices, num_positives, num_negatives) = self._subsample_selection_to_desired_neg_pos_ratio( selected_indices, match, self._max_negatives_per_positive, self._min_negatives_per_image) num_positives_list.append(num_positives) num_negatives_list.append(num_negatives) mined_location_losses.append( tf.reduce_sum(tf.gather(location_losses[ind], selected_indices))) mined_cls_losses.append( tf.reduce_sum(tf.gather(cls_losses[ind], selected_indices))) location_loss = tf.reduce_sum(tf.stack(mined_location_losses)) cls_loss = tf.reduce_sum(tf.stack(mined_cls_losses)) if match and self._max_negatives_per_positive: self._num_positives_list = num_positives_list self._num_negatives_list = num_negatives_list return (location_loss, cls_loss) def summarize(self): """Summarize the number of positives and negatives after mining.""" if self._num_positives_list and self._num_negatives_list: avg_num_positives = tf.reduce_mean(tf.to_float(self._num_positives_list)) avg_num_negatives = tf.reduce_mean(tf.to_float(self._num_negatives_list)) tf.summary.scalar('HardExampleMiner/NumPositives', avg_num_positives) tf.summary.scalar('HardExampleMiner/NumNegatives', avg_num_negatives) def _subsample_selection_to_desired_neg_pos_ratio(self, indices, match, max_negatives_per_positive, min_negatives_per_image=0): """Subsample a collection of selected indices to a desired neg:pos ratio. This function takes a subset of M indices (indexing into a large anchor collection of N anchors where M<N) which are labeled as positive/negative via a Match object (matched indices are positive, unmatched indices are negative). It returns a subset of the provided indices retaining all positives as well as up to the first K negatives, where: K=floor(num_negative_per_positive * num_positives). For example, if indices=[2, 4, 5, 7, 9, 10] (indexing into 12 anchors), with positives=[2, 5] and negatives=[4, 7, 9, 10] and num_negatives_per_positive=1, then the returned subset of indices is [2, 4, 5, 7]. Args: indices: An integer tensor of shape [M] representing a collection of selected anchor indices match: A matcher.Match object encoding the match between anchors and groundtruth boxes for a given image, with rows of the Match objects corresponding to groundtruth boxes and columns corresponding to anchors. max_negatives_per_positive: (float) maximum number of negatives for each positive anchor. min_negatives_per_image: minimum number of negative anchors for a given image. Allow sampling negatives in image without any positive anchors. Returns: selected_indices: An integer tensor of shape [M'] representing a collection of selected anchor indices with M' <= M. num_positives: An integer tensor representing the number of positive examples in selected set of indices. num_negatives: An integer tensor representing the number of negative examples in selected set of indices. """ positives_indicator = tf.gather(match.matched_column_indicator(), indices) negatives_indicator = tf.gather(match.unmatched_column_indicator(), indices) num_positives = tf.reduce_sum(tf.to_int32(positives_indicator)) max_negatives = tf.maximum(min_negatives_per_image, tf.to_int32(max_negatives_per_positive * tf.to_float(num_positives))) topk_negatives_indicator = tf.less_equal( tf.cumsum(tf.to_int32(negatives_indicator)), max_negatives) subsampled_selection_indices = tf.where( tf.logical_or(positives_indicator, topk_negatives_indicator)) num_negatives = tf.size(subsampled_selection_indices) - num_positives return (tf.reshape(tf.gather(indices, subsampled_selection_indices), [-1]), num_positives, num_negatives)
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/core/losses.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Region Similarity Calculators for BoxLists. Region Similarity Calculators compare a pairwise measure of similarity between the boxes in two BoxLists. """ from abc import ABCMeta from abc import abstractmethod import tensorflow as tf from object_detection.core import box_list_ops from object_detection.core import standard_fields as fields class RegionSimilarityCalculator(object): """Abstract base class for region similarity calculator.""" __metaclass__ = ABCMeta def compare(self, boxlist1, boxlist2, scope=None): """Computes matrix of pairwise similarity between BoxLists. This op (to be overridden) computes a measure of pairwise similarity between the boxes in the given BoxLists. Higher values indicate more similarity. Note that this method simply measures similarity and does not explicitly perform a matching. Args: boxlist1: BoxList holding N boxes. boxlist2: BoxList holding M boxes. scope: Op scope name. Defaults to 'Compare' if None. Returns: a (float32) tensor of shape [N, M] with pairwise similarity score. """ with tf.name_scope(scope, 'Compare', [boxlist1, boxlist2]) as scope: return self._compare(boxlist1, boxlist2) @abstractmethod def _compare(self, boxlist1, boxlist2): pass class IouSimilarity(RegionSimilarityCalculator): """Class to compute similarity based on Intersection over Union (IOU) metric. This class computes pairwise similarity between two BoxLists based on IOU. """ def _compare(self, boxlist1, boxlist2): """Compute pairwise IOU similarity between the two BoxLists. Args: boxlist1: BoxList holding N boxes. boxlist2: BoxList holding M boxes. Returns: A tensor with shape [N, M] representing pairwise iou scores. """ return box_list_ops.iou(boxlist1, boxlist2) class NegSqDistSimilarity(RegionSimilarityCalculator): """Class to compute similarity based on the squared distance metric. This class computes pairwise similarity between two BoxLists based on the negative squared distance metric. """ def _compare(self, boxlist1, boxlist2): """Compute matrix of (negated) sq distances. Args: boxlist1: BoxList holding N boxes. boxlist2: BoxList holding M boxes. Returns: A tensor with shape [N, M] representing negated pairwise squared distance. """ return -1 * box_list_ops.sq_dist(boxlist1, boxlist2) class IoaSimilarity(RegionSimilarityCalculator): """Class to compute similarity based on Intersection over Area (IOA) metric. This class computes pairwise similarity between two BoxLists based on their pairwise intersections divided by the areas of second BoxLists. """ def _compare(self, boxlist1, boxlist2): """Compute pairwise IOA similarity between the two BoxLists. Args: boxlist1: BoxList holding N boxes. boxlist2: BoxList holding M boxes. Returns: A tensor with shape [N, M] representing pairwise IOA scores. """ return box_list_ops.ioa(boxlist1, boxlist2) class ThresholdedIouSimilarity(RegionSimilarityCalculator): """Class to compute similarity based on thresholded IOU and score. This class computes pairwise similarity between two BoxLists based on IOU and a 'score' present in boxlist1. If IOU > threshold, then the entry in the output pairwise tensor will contain `score`, otherwise 0. """ def __init__(self, iou_threshold=0): """Initialize the ThresholdedIouSimilarity. Args: iou_threshold: For a given pair of boxes, if the IOU is > iou_threshold, then the comparison result will be the foreground probability of the first box, otherwise it will be zero. """ self._iou_threshold = iou_threshold def _compare(self, boxlist1, boxlist2): """Compute pairwise IOU similarity between the two BoxLists and score. Args: boxlist1: BoxList holding N boxes. Must have a score field. boxlist2: BoxList holding M boxes. Returns: A tensor with shape [N, M] representing scores threholded by pairwise iou scores. """ ious = box_list_ops.iou(boxlist1, boxlist2) scores = boxlist1.get_field(fields.BoxListFields.scores) scores = tf.expand_dims(scores, axis=1) row_replicated_scores = tf.tile(scores, [1, tf.shape(ious)[-1]]) thresholded_ious = tf.where(ious > self._iou_threshold, row_replicated_scores, tf.zeros_like(ious)) return thresholded_ious
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/core/region_similarity_calculator.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.core.box_coder.""" import tensorflow as tf from object_detection.core import box_coder from object_detection.core import box_list class MockBoxCoder(box_coder.BoxCoder): """Test BoxCoder that encodes/decodes using the multiply-by-two function.""" def code_size(self): return 4 def _encode(self, boxes, anchors): return 2.0 * boxes.get() def _decode(self, rel_codes, anchors): return box_list.BoxList(rel_codes / 2.0) class BoxCoderTest(tf.test.TestCase): def test_batch_decode(self): mock_anchor_corners = tf.constant( [[0, 0.1, 0.2, 0.3], [0.2, 0.4, 0.4, 0.6]], tf.float32) mock_anchors = box_list.BoxList(mock_anchor_corners) mock_box_coder = MockBoxCoder() expected_boxes = [[[0.0, 0.1, 0.5, 0.6], [0.5, 0.6, 0.7, 0.8]], [[0.1, 0.2, 0.3, 0.4], [0.7, 0.8, 0.9, 1.0]]] encoded_boxes_list = [mock_box_coder.encode( box_list.BoxList(tf.constant(boxes)), mock_anchors) for boxes in expected_boxes] encoded_boxes = tf.stack(encoded_boxes_list) decoded_boxes = box_coder.batch_decode( encoded_boxes, mock_box_coder, mock_anchors) with self.test_session() as sess: decoded_boxes_result = sess.run(decoded_boxes) self.assertAllClose(expected_boxes, decoded_boxes_result) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/core/box_coder_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for google3.research.vale.object_detection.losses.""" import math import numpy as np import tensorflow as tf from object_detection.core import box_list from object_detection.core import losses from object_detection.core import matcher class WeightedL2LocalizationLossTest(tf.test.TestCase): def testReturnsCorrectWeightedLoss(self): batch_size = 3 num_anchors = 10 code_size = 4 prediction_tensor = tf.ones([batch_size, num_anchors, code_size]) target_tensor = tf.zeros([batch_size, num_anchors, code_size]) weights = tf.constant([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0, 0, 0, 0]], tf.float32) loss_op = losses.WeightedL2LocalizationLoss() loss = tf.reduce_sum(loss_op(prediction_tensor, target_tensor, weights=weights)) expected_loss = (3 * 5 * 4) / 2.0 with self.test_session() as sess: loss_output = sess.run(loss) self.assertAllClose(loss_output, expected_loss) def testReturnsCorrectAnchorwiseLoss(self): batch_size = 3 num_anchors = 16 code_size = 4 prediction_tensor = tf.ones([batch_size, num_anchors, code_size]) target_tensor = tf.zeros([batch_size, num_anchors, code_size]) weights = tf.ones([batch_size, num_anchors]) loss_op = losses.WeightedL2LocalizationLoss() loss = loss_op(prediction_tensor, target_tensor, weights=weights) expected_loss = np.ones((batch_size, num_anchors)) * 2 with self.test_session() as sess: loss_output = sess.run(loss) self.assertAllClose(loss_output, expected_loss) def testReturnsCorrectNanLoss(self): batch_size = 3 num_anchors = 10 code_size = 4 prediction_tensor = tf.ones([batch_size, num_anchors, code_size]) target_tensor = tf.concat([ tf.zeros([batch_size, num_anchors, code_size / 2]), tf.ones([batch_size, num_anchors, code_size / 2]) * np.nan ], axis=2) weights = tf.ones([batch_size, num_anchors]) loss_op = losses.WeightedL2LocalizationLoss() loss = loss_op(prediction_tensor, target_tensor, weights=weights, ignore_nan_targets=True) loss = tf.reduce_sum(loss) expected_loss = (3 * 5 * 4) / 2.0 with self.test_session() as sess: loss_output = sess.run(loss) self.assertAllClose(loss_output, expected_loss) def testReturnsCorrectWeightedLossWithLossesMask(self): batch_size = 4 num_anchors = 10 code_size = 4 prediction_tensor = tf.ones([batch_size, num_anchors, code_size]) target_tensor = tf.zeros([batch_size, num_anchors, code_size]) weights = tf.constant([[1, 1, 1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0, 0, 0, 0]], tf.float32) losses_mask = tf.constant([True, False, True, True], tf.bool) loss_op = losses.WeightedL2LocalizationLoss() loss = tf.reduce_sum(loss_op(prediction_tensor, target_tensor, weights=weights, losses_mask=losses_mask)) expected_loss = (3 * 5 * 4) / 2.0 with self.test_session() as sess: loss_output = sess.run(loss) self.assertAllClose(loss_output, expected_loss) class WeightedSmoothL1LocalizationLossTest(tf.test.TestCase): def testReturnsCorrectLoss(self): batch_size = 2 num_anchors = 3 code_size = 4 prediction_tensor = tf.constant([[[2.5, 0, .4, 0], [0, 0, 0, 0], [0, 2.5, 0, .4]], [[3.5, 0, 0, 0], [0, .4, 0, .9], [0, 0, 1.5, 0]]], tf.float32) target_tensor = tf.zeros([batch_size, num_anchors, code_size]) weights = tf.constant([[2, 1, 1], [0, 3, 0]], tf.float32) loss_op = losses.WeightedSmoothL1LocalizationLoss() loss = loss_op(prediction_tensor, target_tensor, weights=weights) loss = tf.reduce_sum(loss) exp_loss = 7.695 with self.test_session() as sess: loss_output = sess.run(loss) self.assertAllClose(loss_output, exp_loss) def testReturnsCorrectLossWithLossesMask(self): batch_size = 3 num_anchors = 3 code_size = 4 prediction_tensor = tf.constant([[[2.5, 0, .4, 0], [0, 0, 0, 0], [0, 2.5, 0, .4]], [[3.5, 0, 0, 0], [0, .4, 0, .9], [0, 0, 1.5, 0]], [[3.5, 7., 0, 0], [0, .4, 0, .9], [2.2, 2.2, 1.5, 0]]], tf.float32) target_tensor = tf.zeros([batch_size, num_anchors, code_size]) weights = tf.constant([[2, 1, 1], [0, 3, 0], [4, 3, 0]], tf.float32) losses_mask = tf.constant([True, True, False], tf.bool) loss_op = losses.WeightedSmoothL1LocalizationLoss() loss = loss_op(prediction_tensor, target_tensor, weights=weights, losses_mask=losses_mask) loss = tf.reduce_sum(loss) exp_loss = 7.695 with self.test_session() as sess: loss_output = sess.run(loss) self.assertAllClose(loss_output, exp_loss) class WeightedIOULocalizationLossTest(tf.test.TestCase): def testReturnsCorrectLoss(self): prediction_tensor = tf.constant([[[1.5, 0, 2.4, 1], [0, 0, 1, 1], [0, 0, .5, .25]]]) target_tensor = tf.constant([[[1.5, 0, 2.4, 1], [0, 0, 1, 1], [50, 50, 500.5, 100.25]]]) weights = [[1.0, .5, 2.0]] loss_op = losses.WeightedIOULocalizationLoss() loss = loss_op(prediction_tensor, target_tensor, weights=weights) loss = tf.reduce_sum(loss) exp_loss = 2.0 with self.test_session() as sess: loss_output = sess.run(loss) self.assertAllClose(loss_output, exp_loss) def testReturnsCorrectLossWithNoLabels(self): prediction_tensor = tf.constant([[[1.5, 0, 2.4, 1], [0, 0, 1, 1], [0, 0, .5, .25]]]) target_tensor = tf.constant([[[1.5, 0, 2.4, 1], [0, 0, 1, 1], [50, 50, 500.5, 100.25]]]) weights = [[1.0, .5, 2.0]] losses_mask = tf.constant([False], tf.bool) loss_op = losses.WeightedIOULocalizationLoss() loss = loss_op(prediction_tensor, target_tensor, weights=weights, losses_mask=losses_mask) loss = tf.reduce_sum(loss) exp_loss = 0.0 with self.test_session() as sess: loss_output = sess.run(loss) self.assertAllClose(loss_output, exp_loss) class WeightedSigmoidClassificationLossTest(tf.test.TestCase): def testReturnsCorrectLoss(self): prediction_tensor = tf.constant([[[-100, 100, -100], [100, -100, -100], [100, 0, -100], [-100, -100, 100]], [[-100, 0, 100], [-100, 100, -100], [100, 100, 100], [0, 0, -1]]], tf.float32) target_tensor = tf.constant([[[0, 1, 0], [1, 0, 0], [1, 0, 0], [0, 0, 1]], [[0, 0, 1], [0, 1, 0], [1, 1, 1], [1, 0, 0]]], tf.float32) weights = tf.constant([[[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1], [0, 0, 0]]], tf.float32) loss_op = losses.WeightedSigmoidClassificationLoss() loss = loss_op(prediction_tensor, target_tensor, weights=weights) loss = tf.reduce_sum(loss) exp_loss = -2 * math.log(.5) with self.test_session() as sess: loss_output = sess.run(loss) self.assertAllClose(loss_output, exp_loss) def testReturnsCorrectAnchorWiseLoss(self): prediction_tensor = tf.constant([[[-100, 100, -100], [100, -100, -100], [100, 0, -100], [-100, -100, 100]], [[-100, 0, 100], [-100, 100, -100], [100, 100, 100], [0, 0, -1]]], tf.float32) target_tensor = tf.constant([[[0, 1, 0], [1, 0, 0], [1, 0, 0], [0, 0, 1]], [[0, 0, 1], [0, 1, 0], [1, 1, 1], [1, 0, 0]]], tf.float32) weights = tf.constant([[[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1], [0, 0, 0]]], tf.float32) loss_op = losses.WeightedSigmoidClassificationLoss() loss = loss_op(prediction_tensor, target_tensor, weights=weights) loss = tf.reduce_sum(loss, axis=2) exp_loss = np.matrix([[0, 0, -math.log(.5), 0], [-math.log(.5), 0, 0, 0]]) with self.test_session() as sess: loss_output = sess.run(loss) self.assertAllClose(loss_output, exp_loss) def testReturnsCorrectLossWithClassIndices(self): prediction_tensor = tf.constant([[[-100, 100, -100, 100], [100, -100, -100, -100], [100, 0, -100, 100], [-100, -100, 100, -100]], [[-100, 0, 100, 100], [-100, 100, -100, 100], [100, 100, 100, 100], [0, 0, -1, 100]]], tf.float32) target_tensor = tf.constant([[[0, 1, 0, 0], [1, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]], [[0, 0, 1, 0], [0, 1, 0, 0], [1, 1, 1, 0], [1, 0, 0, 0]]], tf.float32) weights = tf.constant([[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [0, 0, 0, 0]]], tf.float32) # Ignores the last class. class_indices = tf.constant([0, 1, 2], tf.int32) loss_op = losses.WeightedSigmoidClassificationLoss() loss = loss_op(prediction_tensor, target_tensor, weights=weights, class_indices=class_indices) loss = tf.reduce_sum(loss, axis=2) exp_loss = np.matrix([[0, 0, -math.log(.5), 0], [-math.log(.5), 0, 0, 0]]) with self.test_session() as sess: loss_output = sess.run(loss) self.assertAllClose(loss_output, exp_loss) def testReturnsCorrectLossWithLossesMask(self): prediction_tensor = tf.constant([[[-100, 100, -100], [100, -100, -100], [100, 0, -100], [-100, -100, 100]], [[-100, 0, 100], [-100, 100, -100], [100, 100, 100], [0, 0, -1]], [[-100, 0, 100], [-100, 100, -100], [100, 100, 100], [0, 0, -100]]], tf.float32) target_tensor = tf.constant([[[0, 1, 0], [1, 0, 0], [1, 0, 0], [0, 0, 1]], [[0, 0, 1], [0, 1, 0], [1, 1, 1], [1, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]], tf.float32) weights = tf.constant([[[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1], [0, 0, 0]], [[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]]], tf.float32) losses_mask = tf.constant([True, True, False], tf.bool) loss_op = losses.WeightedSigmoidClassificationLoss() loss_per_anchor = loss_op(prediction_tensor, target_tensor, weights=weights, losses_mask=losses_mask) loss = tf.reduce_sum(loss_per_anchor) exp_loss = -2 * math.log(.5) with self.test_session() as sess: loss_output = sess.run(loss) self.assertAllEqual(prediction_tensor.shape.as_list(), loss_per_anchor.shape.as_list()) self.assertAllEqual(target_tensor.shape.as_list(), loss_per_anchor.shape.as_list()) self.assertAllClose(loss_output, exp_loss) def _logit(probability): return math.log(probability / (1. - probability)) class SigmoidFocalClassificationLossTest(tf.test.TestCase): def testEasyExamplesProduceSmallLossComparedToSigmoidXEntropy(self): prediction_tensor = tf.constant([[[_logit(0.97)], [_logit(0.91)], [_logit(0.73)], [_logit(0.27)], [_logit(0.09)], [_logit(0.03)]]], tf.float32) target_tensor = tf.constant([[[1], [1], [1], [0], [0], [0]]], tf.float32) weights = tf.constant([[[1], [1], [1], [1], [1], [1]]], tf.float32) focal_loss_op = losses.SigmoidFocalClassificationLoss(gamma=2.0, alpha=None) sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss() focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor, weights=weights), axis=2) sigmoid_loss = tf.reduce_sum(sigmoid_loss_op(prediction_tensor, target_tensor, weights=weights), axis=2) with self.test_session() as sess: sigmoid_loss, focal_loss = sess.run([sigmoid_loss, focal_loss]) order_of_ratio = np.power(10, np.floor(np.log10(sigmoid_loss / focal_loss))) self.assertAllClose(order_of_ratio, [[1000, 100, 10, 10, 100, 1000]]) def testHardExamplesProduceLossComparableToSigmoidXEntropy(self): prediction_tensor = tf.constant([[[_logit(0.55)], [_logit(0.52)], [_logit(0.50)], [_logit(0.48)], [_logit(0.45)]]], tf.float32) target_tensor = tf.constant([[[1], [1], [1], [0], [0]]], tf.float32) weights = tf.constant([[[1], [1], [1], [1], [1]]], tf.float32) focal_loss_op = losses.SigmoidFocalClassificationLoss(gamma=2.0, alpha=None) sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss() focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor, weights=weights), axis=2) sigmoid_loss = tf.reduce_sum(sigmoid_loss_op(prediction_tensor, target_tensor, weights=weights), axis=2) with self.test_session() as sess: sigmoid_loss, focal_loss = sess.run([sigmoid_loss, focal_loss]) order_of_ratio = np.power(10, np.floor(np.log10(sigmoid_loss / focal_loss))) self.assertAllClose(order_of_ratio, [[1., 1., 1., 1., 1.]]) def testNonAnchorWiseOutputComparableToSigmoidXEntropy(self): prediction_tensor = tf.constant([[[_logit(0.55)], [_logit(0.52)], [_logit(0.50)], [_logit(0.48)], [_logit(0.45)]]], tf.float32) target_tensor = tf.constant([[[1], [1], [1], [0], [0]]], tf.float32) weights = tf.constant([[[1], [1], [1], [1], [1]]], tf.float32) focal_loss_op = losses.SigmoidFocalClassificationLoss(gamma=2.0, alpha=None) sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss() focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor, weights=weights)) sigmoid_loss = tf.reduce_sum(sigmoid_loss_op(prediction_tensor, target_tensor, weights=weights)) with self.test_session() as sess: sigmoid_loss, focal_loss = sess.run([sigmoid_loss, focal_loss]) order_of_ratio = np.power(10, np.floor(np.log10(sigmoid_loss / focal_loss))) self.assertAlmostEqual(order_of_ratio, 1.) def testIgnoreNegativeExampleLossViaAlphaMultiplier(self): prediction_tensor = tf.constant([[[_logit(0.55)], [_logit(0.52)], [_logit(0.50)], [_logit(0.48)], [_logit(0.45)]]], tf.float32) target_tensor = tf.constant([[[1], [1], [1], [0], [0]]], tf.float32) weights = tf.constant([[[1], [1], [1], [1], [1]]], tf.float32) focal_loss_op = losses.SigmoidFocalClassificationLoss(gamma=2.0, alpha=1.0) sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss() focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor, weights=weights), axis=2) sigmoid_loss = tf.reduce_sum(sigmoid_loss_op(prediction_tensor, target_tensor, weights=weights), axis=2) with self.test_session() as sess: sigmoid_loss, focal_loss = sess.run([sigmoid_loss, focal_loss]) self.assertAllClose(focal_loss[0][3:], [0., 0.]) order_of_ratio = np.power(10, np.floor(np.log10(sigmoid_loss[0][:3] / focal_loss[0][:3]))) self.assertAllClose(order_of_ratio, [1., 1., 1.]) def testIgnorePositiveExampleLossViaAlphaMultiplier(self): prediction_tensor = tf.constant([[[_logit(0.55)], [_logit(0.52)], [_logit(0.50)], [_logit(0.48)], [_logit(0.45)]]], tf.float32) target_tensor = tf.constant([[[1], [1], [1], [0], [0]]], tf.float32) weights = tf.constant([[[1], [1], [1], [1], [1]]], tf.float32) focal_loss_op = losses.SigmoidFocalClassificationLoss(gamma=2.0, alpha=0.0) sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss() focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor, weights=weights), axis=2) sigmoid_loss = tf.reduce_sum(sigmoid_loss_op(prediction_tensor, target_tensor, weights=weights), axis=2) with self.test_session() as sess: sigmoid_loss, focal_loss = sess.run([sigmoid_loss, focal_loss]) self.assertAllClose(focal_loss[0][:3], [0., 0., 0.]) order_of_ratio = np.power(10, np.floor(np.log10(sigmoid_loss[0][3:] / focal_loss[0][3:]))) self.assertAllClose(order_of_ratio, [1., 1.]) def testSimilarToSigmoidXEntropyWithHalfAlphaAndZeroGammaUpToAScale(self): prediction_tensor = tf.constant([[[-100, 100, -100], [100, -100, -100], [100, 0, -100], [-100, -100, 100]], [[-100, 0, 100], [-100, 100, -100], [100, 100, 100], [0, 0, -1]]], tf.float32) target_tensor = tf.constant([[[0, 1, 0], [1, 0, 0], [1, 0, 0], [0, 0, 1]], [[0, 0, 1], [0, 1, 0], [1, 1, 1], [1, 0, 0]]], tf.float32) weights = tf.constant([[[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1], [0, 0, 0]]], tf.float32) focal_loss_op = losses.SigmoidFocalClassificationLoss(alpha=0.5, gamma=0.0) sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss() focal_loss = focal_loss_op(prediction_tensor, target_tensor, weights=weights) sigmoid_loss = sigmoid_loss_op(prediction_tensor, target_tensor, weights=weights) with self.test_session() as sess: sigmoid_loss, focal_loss = sess.run([sigmoid_loss, focal_loss]) self.assertAllClose(sigmoid_loss, focal_loss * 2) def testSameAsSigmoidXEntropyWithNoAlphaAndZeroGamma(self): prediction_tensor = tf.constant([[[-100, 100, -100], [100, -100, -100], [100, 0, -100], [-100, -100, 100]], [[-100, 0, 100], [-100, 100, -100], [100, 100, 100], [0, 0, -1]]], tf.float32) target_tensor = tf.constant([[[0, 1, 0], [1, 0, 0], [1, 0, 0], [0, 0, 1]], [[0, 0, 1], [0, 1, 0], [1, 1, 1], [1, 0, 0]]], tf.float32) weights = tf.constant([[[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1], [0, 0, 0]]], tf.float32) focal_loss_op = losses.SigmoidFocalClassificationLoss(alpha=None, gamma=0.0) sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss() focal_loss = focal_loss_op(prediction_tensor, target_tensor, weights=weights) sigmoid_loss = sigmoid_loss_op(prediction_tensor, target_tensor, weights=weights) with self.test_session() as sess: sigmoid_loss, focal_loss = sess.run([sigmoid_loss, focal_loss]) self.assertAllClose(sigmoid_loss, focal_loss) def testExpectedLossWithAlphaOneAndZeroGamma(self): # All zeros correspond to 0.5 probability. prediction_tensor = tf.constant([[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]], tf.float32) target_tensor = tf.constant([[[0, 1, 0], [1, 0, 0], [1, 0, 0], [0, 0, 1]], [[0, 0, 1], [0, 1, 0], [1, 0, 0], [1, 0, 0]]], tf.float32) weights = tf.constant([[[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]]], tf.float32) focal_loss_op = losses.SigmoidFocalClassificationLoss(alpha=1.0, gamma=0.0) focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor, weights=weights)) with self.test_session() as sess: focal_loss = sess.run(focal_loss) self.assertAllClose( (-math.log(.5) * # x-entropy per class per anchor 1.0 * # alpha 8), # positives from 8 anchors focal_loss) def testExpectedLossWithAlpha75AndZeroGamma(self): # All zeros correspond to 0.5 probability. prediction_tensor = tf.constant([[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]], tf.float32) target_tensor = tf.constant([[[0, 1, 0], [1, 0, 0], [1, 0, 0], [0, 0, 1]], [[0, 0, 1], [0, 1, 0], [1, 0, 0], [1, 0, 0]]], tf.float32) weights = tf.constant([[[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]]], tf.float32) focal_loss_op = losses.SigmoidFocalClassificationLoss(alpha=0.75, gamma=0.0) focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor, weights=weights)) with self.test_session() as sess: focal_loss = sess.run(focal_loss) self.assertAllClose( (-math.log(.5) * # x-entropy per class per anchor. ((0.75 * # alpha for positives. 8) + # positives from 8 anchors. (0.25 * # alpha for negatives. 8 * 2))), # negatives from 8 anchors for two classes. focal_loss) def testExpectedLossWithLossesMask(self): # All zeros correspond to 0.5 probability. prediction_tensor = tf.constant([[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]], tf.float32) target_tensor = tf.constant([[[0, 1, 0], [1, 0, 0], [1, 0, 0], [0, 0, 1]], [[0, 0, 1], [0, 1, 0], [1, 0, 0], [1, 0, 0]], [[1, 0, 0], [1, 0, 0], [1, 0, 0], [1, 0, 0]]], tf.float32) weights = tf.constant([[[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]]], tf.float32) losses_mask = tf.constant([True, True, False], tf.bool) focal_loss_op = losses.SigmoidFocalClassificationLoss(alpha=0.75, gamma=0.0) focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor, weights=weights, losses_mask=losses_mask)) with self.test_session() as sess: focal_loss = sess.run(focal_loss) self.assertAllClose( (-math.log(.5) * # x-entropy per class per anchor. ((0.75 * # alpha for positives. 8) + # positives from 8 anchors. (0.25 * # alpha for negatives. 8 * 2))), # negatives from 8 anchors for two classes. focal_loss) class WeightedSoftmaxClassificationLossTest(tf.test.TestCase): def testReturnsCorrectLoss(self): prediction_tensor = tf.constant([[[-100, 100, -100], [100, -100, -100], [0, 0, -100], [-100, -100, 100]], [[-100, 0, 0], [-100, 100, -100], [-100, 100, -100], [100, -100, -100]]], tf.float32) target_tensor = tf.constant([[[0, 1, 0], [1, 0, 0], [1, 0, 0], [0, 0, 1]], [[0, 0, 1], [0, 1, 0], [0, 1, 0], [1, 0, 0]]], tf.float32) weights = tf.constant([[[1, 1, 1], [1, 1, 1], [0.5, 0.5, 0.5], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1], [0, 0, 0]]], tf.float32) loss_op = losses.WeightedSoftmaxClassificationLoss() loss = loss_op(prediction_tensor, target_tensor, weights=weights) loss = tf.reduce_sum(loss) exp_loss = - 1.5 * math.log(.5) with self.test_session() as sess: loss_output = sess.run(loss) self.assertAllClose(loss_output, exp_loss) def testReturnsCorrectAnchorWiseLoss(self): prediction_tensor = tf.constant([[[-100, 100, -100], [100, -100, -100], [0, 0, -100], [-100, -100, 100]], [[-100, 0, 0], [-100, 100, -100], [-100, 100, -100], [100, -100, -100]]], tf.float32) target_tensor = tf.constant([[[0, 1, 0], [1, 0, 0], [1, 0, 0], [0, 0, 1]], [[0, 0, 1], [0, 1, 0], [0, 1, 0], [1, 0, 0]]], tf.float32) weights = tf.constant([[[1, 1, 1], [1, 1, 1], [0.5, 0.5, 0.5], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1], [0, 0, 0]]], tf.float32) loss_op = losses.WeightedSoftmaxClassificationLoss() loss = loss_op(prediction_tensor, target_tensor, weights=weights) exp_loss = np.matrix([[0, 0, - 0.5 * math.log(.5), 0], [-math.log(.5), 0, 0, 0]]) with self.test_session() as sess: loss_output = sess.run(loss) self.assertAllClose(loss_output, exp_loss) def testReturnsCorrectAnchorWiseLossWithHighLogitScaleSetting(self): """At very high logit_scale, all predictions will be ~0.33.""" # TODO(yonib): Also test logit_scale with anchorwise=False. logit_scale = 10e16 prediction_tensor = tf.constant([[[-100, 100, -100], [100, -100, -100], [0, 0, -100], [-100, -100, 100]], [[-100, 0, 0], [-100, 100, -100], [-100, 100, -100], [100, -100, -100]]], tf.float32) target_tensor = tf.constant([[[0, 1, 0], [1, 0, 0], [1, 0, 0], [0, 0, 1]], [[0, 0, 1], [0, 1, 0], [0, 1, 0], [1, 0, 0]]], tf.float32) weights = tf.constant([[[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]]], tf.float32) loss_op = losses.WeightedSoftmaxClassificationLoss(logit_scale=logit_scale) loss = loss_op(prediction_tensor, target_tensor, weights=weights) uniform_distribution_loss = - math.log(.33333333333) exp_loss = np.matrix([[uniform_distribution_loss] * 4, [uniform_distribution_loss] * 4]) with self.test_session() as sess: loss_output = sess.run(loss) self.assertAllClose(loss_output, exp_loss) def testReturnsCorrectLossWithLossesMask(self): prediction_tensor = tf.constant([[[-100, 100, -100], [100, -100, -100], [0, 0, -100], [-100, -100, 100]], [[-100, 0, 0], [-100, 100, -100], [-100, 100, -100], [100, -100, -100]], [[-100, 0, 0], [-100, 100, -100], [-100, 100, -100], [100, -100, -100]]], tf.float32) target_tensor = tf.constant([[[0, 1, 0], [1, 0, 0], [1, 0, 0], [0, 0, 1]], [[0, 0, 1], [0, 1, 0], [0, 1, 0], [1, 0, 0]], [[1, 0, 0], [1, 0, 0], [1, 0, 0], [1, 0, 0]]], tf.float32) weights = tf.constant([[[1, 1, 1], [1, 1, 1], [0.5, 0.5, 0.5], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1], [0, 0, 0]], [[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]]], tf.float32) losses_mask = tf.constant([True, True, False], tf.bool) loss_op = losses.WeightedSoftmaxClassificationLoss() loss = loss_op(prediction_tensor, target_tensor, weights=weights, losses_mask=losses_mask) loss = tf.reduce_sum(loss) exp_loss = - 1.5 * math.log(.5) with self.test_session() as sess: loss_output = sess.run(loss) self.assertAllClose(loss_output, exp_loss) class WeightedSoftmaxClassificationAgainstLogitsLossTest(tf.test.TestCase): def testReturnsCorrectLoss(self): prediction_tensor = tf.constant([[[-100, 100, -100], [100, -100, -100], [0, 0, -100], [-100, -100, 100]], [[-100, 0, 0], [-100, 100, -100], [-100, 100, -100], [100, -100, -100]]], tf.float32) target_tensor = tf.constant([[[-100, 100, -100], [100, -100, -100], [100, -100, -100], [-100, -100, 100]], [[-100, -100, 100], [-100, 100, -100], [-100, 100, -100], [100, -100, -100]]], tf.float32) weights = tf.constant([[1, 1, .5, 1], [1, 1, 1, 1]], tf.float32) weights_shape = tf.shape(weights) weights_multiple = tf.concat( [tf.ones_like(weights_shape), tf.constant([3])], axis=0) weights = tf.tile(tf.expand_dims(weights, 2), weights_multiple) loss_op = losses.WeightedSoftmaxClassificationAgainstLogitsLoss() loss = loss_op(prediction_tensor, target_tensor, weights=weights) loss = tf.reduce_sum(loss) exp_loss = - 1.5 * math.log(.5) with self.test_session() as sess: loss_output = sess.run(loss) self.assertAllClose(loss_output, exp_loss) def testReturnsCorrectAnchorWiseLoss(self): prediction_tensor = tf.constant([[[-100, 100, -100], [100, -100, -100], [0, 0, -100], [-100, -100, 100]], [[-100, 0, 0], [-100, 100, -100], [-100, 100, -100], [100, -100, -100]]], tf.float32) target_tensor = tf.constant([[[-100, 100, -100], [100, -100, -100], [100, -100, -100], [-100, -100, 100]], [[-100, -100, 100], [-100, 100, -100], [-100, 100, -100], [100, -100, -100]]], tf.float32) weights = tf.constant([[1, 1, .5, 1], [1, 1, 1, 0]], tf.float32) weights_shape = tf.shape(weights) weights_multiple = tf.concat( [tf.ones_like(weights_shape), tf.constant([3])], axis=0) weights = tf.tile(tf.expand_dims(weights, 2), weights_multiple) loss_op = losses.WeightedSoftmaxClassificationAgainstLogitsLoss() loss = loss_op(prediction_tensor, target_tensor, weights=weights) exp_loss = np.matrix([[0, 0, - 0.5 * math.log(.5), 0], [-math.log(.5), 0, 0, 0]]) with self.test_session() as sess: loss_output = sess.run(loss) self.assertAllClose(loss_output, exp_loss) def testReturnsCorrectAnchorWiseLossWithLogitScaleSetting(self): logit_scale = 100. prediction_tensor = tf.constant([[[-100, 100, -100], [100, -100, -100], [0, 0, -100], [-100, -100, 100]], [[-100, 0, 0], [-100, 100, -100], [-100, 100, -100], [100, -100, -100]]], tf.float32) target_tensor = tf.constant([[[-100, 100, -100], [100, -100, -100], [0, 0, -100], [-100, -100, 100]], [[-100, 0, 0], [-100, 100, -100], [-100, 100, -100], [100, -100, -100]]], tf.float32) weights = tf.constant([[1, 1, .5, 1], [1, 1, 1, 0]], tf.float32) weights_shape = tf.shape(weights) weights_multiple = tf.concat( [tf.ones_like(weights_shape), tf.constant([3])], axis=0) weights = tf.tile(tf.expand_dims(weights, 2), weights_multiple) loss_op = losses.WeightedSoftmaxClassificationAgainstLogitsLoss( logit_scale=logit_scale) loss = loss_op(prediction_tensor, target_tensor, weights=weights) # find softmax of the two prediction types above softmax_pred1 = [np.exp(-1), np.exp(-1), np.exp(1)] softmax_pred1 /= sum(softmax_pred1) softmax_pred2 = [np.exp(0), np.exp(0), np.exp(-1)] softmax_pred2 /= sum(softmax_pred2) # compute the expected cross entropy for perfect matches exp_entropy1 = sum( [-x*np.log(x) for x in softmax_pred1]) exp_entropy2 = sum( [-x*np.log(x) for x in softmax_pred2]) # weighted expected losses exp_loss = np.matrix( [[exp_entropy1, exp_entropy1, exp_entropy2*.5, exp_entropy1], [exp_entropy2, exp_entropy1, exp_entropy1, 0.]]) with self.test_session() as sess: loss_output = sess.run(loss) self.assertAllClose(loss_output, exp_loss) class BootstrappedSigmoidClassificationLossTest(tf.test.TestCase): def testReturnsCorrectLossSoftBootstrapping(self): prediction_tensor = tf.constant([[[-100, 100, 0], [100, -100, -100], [100, -100, -100], [-100, -100, 100]], [[-100, -100, 100], [-100, 100, -100], [100, 100, 100], [0, 0, -1]]], tf.float32) target_tensor = tf.constant([[[0, 1, 0], [1, 0, 0], [1, 0, 0], [0, 0, 1]], [[0, 0, 1], [0, 1, 0], [1, 1, 1], [1, 0, 0]]], tf.float32) weights = tf.constant([[[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1], [0, 0, 0]]], tf.float32) alpha = tf.constant(.5, tf.float32) loss_op = losses.BootstrappedSigmoidClassificationLoss( alpha, bootstrap_type='soft') loss = loss_op(prediction_tensor, target_tensor, weights=weights) loss = tf.reduce_sum(loss) exp_loss = -math.log(.5) with self.test_session() as sess: loss_output = sess.run(loss) self.assertAllClose(loss_output, exp_loss) def testReturnsCorrectLossHardBootstrapping(self): prediction_tensor = tf.constant([[[-100, 100, 0], [100, -100, -100], [100, -100, -100], [-100, -100, 100]], [[-100, -100, 100], [-100, 100, -100], [100, 100, 100], [0, 0, -1]]], tf.float32) target_tensor = tf.constant([[[0, 1, 0], [1, 0, 0], [1, 0, 0], [0, 0, 1]], [[0, 0, 1], [0, 1, 0], [1, 1, 1], [1, 0, 0]]], tf.float32) weights = tf.constant([[[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1], [0, 0, 0]]], tf.float32) alpha = tf.constant(.5, tf.float32) loss_op = losses.BootstrappedSigmoidClassificationLoss( alpha, bootstrap_type='hard') loss = loss_op(prediction_tensor, target_tensor, weights=weights) loss = tf.reduce_sum(loss) exp_loss = -math.log(.5) with self.test_session() as sess: loss_output = sess.run(loss) self.assertAllClose(loss_output, exp_loss) def testReturnsCorrectAnchorWiseLoss(self): prediction_tensor = tf.constant([[[-100, 100, -100], [100, -100, -100], [100, 0, -100], [-100, -100, 100]], [[-100, 0, 100], [-100, 100, -100], [100, 100, 100], [0, 0, -1]]], tf.float32) target_tensor = tf.constant([[[0, 1, 0], [1, 0, 0], [1, 0, 0], [0, 0, 1]], [[0, 0, 1], [0, 1, 0], [1, 1, 1], [1, 0, 0]]], tf.float32) weights = tf.constant([[[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1], [0, 0, 0]]], tf.float32) alpha = tf.constant(.5, tf.float32) loss_op = losses.BootstrappedSigmoidClassificationLoss( alpha, bootstrap_type='hard') loss = loss_op(prediction_tensor, target_tensor, weights=weights) loss = tf.reduce_sum(loss, axis=2) exp_loss = np.matrix([[0, 0, -math.log(.5), 0], [-math.log(.5), 0, 0, 0]]) with self.test_session() as sess: loss_output = sess.run(loss) self.assertAllClose(loss_output, exp_loss) class HardExampleMinerTest(tf.test.TestCase): def testHardMiningWithSingleLossType(self): location_losses = tf.constant([[100, 90, 80, 0], [0, 1, 2, 3]], tf.float32) cls_losses = tf.constant([[0, 10, 50, 110], [9, 6, 3, 0]], tf.float32) box_corners = tf.constant([[0.1, 0.1, 0.9, 0.9], [0.1, 0.1, 0.9, 0.9], [0.1, 0.1, 0.9, 0.9], [0.1, 0.1, 0.9, 0.9]], tf.float32) decoded_boxlist_list = [] decoded_boxlist_list.append(box_list.BoxList(box_corners)) decoded_boxlist_list.append(box_list.BoxList(box_corners)) # Uses only location loss to select hard examples loss_op = losses.HardExampleMiner(num_hard_examples=1, iou_threshold=0.0, loss_type='loc', cls_loss_weight=1, loc_loss_weight=1) (loc_loss, cls_loss) = loss_op(location_losses, cls_losses, decoded_boxlist_list) exp_loc_loss = 100 + 3 exp_cls_loss = 0 + 0 with self.test_session() as sess: loc_loss_output = sess.run(loc_loss) self.assertAllClose(loc_loss_output, exp_loc_loss) cls_loss_output = sess.run(cls_loss) self.assertAllClose(cls_loss_output, exp_cls_loss) def testHardMiningWithBothLossType(self): location_losses = tf.constant([[100, 90, 80, 0], [0, 1, 2, 3]], tf.float32) cls_losses = tf.constant([[0, 10, 50, 110], [9, 6, 3, 0]], tf.float32) box_corners = tf.constant([[0.1, 0.1, 0.9, 0.9], [0.1, 0.1, 0.9, 0.9], [0.1, 0.1, 0.9, 0.9], [0.1, 0.1, 0.9, 0.9]], tf.float32) decoded_boxlist_list = [] decoded_boxlist_list.append(box_list.BoxList(box_corners)) decoded_boxlist_list.append(box_list.BoxList(box_corners)) loss_op = losses.HardExampleMiner(num_hard_examples=1, iou_threshold=0.0, loss_type='both', cls_loss_weight=1, loc_loss_weight=1) (loc_loss, cls_loss) = loss_op(location_losses, cls_losses, decoded_boxlist_list) exp_loc_loss = 80 + 0 exp_cls_loss = 50 + 9 with self.test_session() as sess: loc_loss_output = sess.run(loc_loss) self.assertAllClose(loc_loss_output, exp_loc_loss) cls_loss_output = sess.run(cls_loss) self.assertAllClose(cls_loss_output, exp_cls_loss) def testHardMiningNMS(self): location_losses = tf.constant([[100, 90, 80, 0], [0, 1, 2, 3]], tf.float32) cls_losses = tf.constant([[0, 10, 50, 110], [9, 6, 3, 0]], tf.float32) box_corners = tf.constant([[0.1, 0.1, 0.9, 0.9], [0.9, 0.9, 0.99, 0.99], [0.1, 0.1, 0.9, 0.9], [0.1, 0.1, 0.9, 0.9]], tf.float32) decoded_boxlist_list = [] decoded_boxlist_list.append(box_list.BoxList(box_corners)) decoded_boxlist_list.append(box_list.BoxList(box_corners)) loss_op = losses.HardExampleMiner(num_hard_examples=2, iou_threshold=0.5, loss_type='cls', cls_loss_weight=1, loc_loss_weight=1) (loc_loss, cls_loss) = loss_op(location_losses, cls_losses, decoded_boxlist_list) exp_loc_loss = 0 + 90 + 0 + 1 exp_cls_loss = 110 + 10 + 9 + 6 with self.test_session() as sess: loc_loss_output = sess.run(loc_loss) self.assertAllClose(loc_loss_output, exp_loc_loss) cls_loss_output = sess.run(cls_loss) self.assertAllClose(cls_loss_output, exp_cls_loss) def testEnforceNegativesPerPositiveRatio(self): location_losses = tf.constant([[100, 90, 80, 0, 1, 2, 3, 10, 20, 100, 20, 3]], tf.float32) cls_losses = tf.constant([[0, 0, 100, 0, 90, 70, 0, 60, 0, 17, 13, 0]], tf.float32) box_corners = tf.constant([[0.0, 0.0, 0.2, 0.1], [0.0, 0.0, 0.2, 0.1], [0.0, 0.0, 0.2, 0.1], [0.0, 0.0, 0.2, 0.1], [0.0, 0.0, 0.5, 0.1], [0.0, 0.0, 0.6, 0.1], [0.0, 0.0, 0.2, 0.1], [0.0, 0.0, 0.8, 0.1], [0.0, 0.0, 0.2, 0.1], [0.0, 0.0, 1.0, 0.1], [0.0, 0.0, 1.1, 0.1], [0.0, 0.0, 0.2, 0.1]], tf.float32) match_results = tf.constant([2, -1, 0, -1, -1, 1, -1, -1, -1, -1, -1, 3]) match_list = [matcher.Match(match_results)] decoded_boxlist_list = [] decoded_boxlist_list.append(box_list.BoxList(box_corners)) max_negatives_per_positive_list = [0.0, 0.5, 1.0, 1.5, 10] exp_loc_loss_list = [80 + 2, 80 + 1 + 2, 80 + 1 + 2 + 10, 80 + 1 + 2 + 10 + 100, 80 + 1 + 2 + 10 + 100 + 20] exp_cls_loss_list = [100 + 70, 100 + 90 + 70, 100 + 90 + 70 + 60, 100 + 90 + 70 + 60 + 17, 100 + 90 + 70 + 60 + 17 + 13] for max_negatives_per_positive, exp_loc_loss, exp_cls_loss in zip( max_negatives_per_positive_list, exp_loc_loss_list, exp_cls_loss_list): loss_op = losses.HardExampleMiner( num_hard_examples=None, iou_threshold=0.9999, loss_type='cls', cls_loss_weight=1, loc_loss_weight=1, max_negatives_per_positive=max_negatives_per_positive) (loc_loss, cls_loss) = loss_op(location_losses, cls_losses, decoded_boxlist_list, match_list) loss_op.summarize() with self.test_session() as sess: loc_loss_output = sess.run(loc_loss) self.assertAllClose(loc_loss_output, exp_loc_loss) cls_loss_output = sess.run(cls_loss) self.assertAllClose(cls_loss_output, exp_cls_loss) def testEnforceNegativesPerPositiveRatioWithMinNegativesPerImage(self): location_losses = tf.constant([[100, 90, 80, 0, 1, 2, 3, 10, 20, 100, 20, 3]], tf.float32) cls_losses = tf.constant([[0, 0, 100, 0, 90, 70, 0, 60, 0, 17, 13, 0]], tf.float32) box_corners = tf.constant([[0.0, 0.0, 0.2, 0.1], [0.0, 0.0, 0.2, 0.1], [0.0, 0.0, 0.2, 0.1], [0.0, 0.0, 0.2, 0.1], [0.0, 0.0, 0.5, 0.1], [0.0, 0.0, 0.6, 0.1], [0.0, 0.0, 0.2, 0.1], [0.0, 0.0, 0.8, 0.1], [0.0, 0.0, 0.2, 0.1], [0.0, 0.0, 1.0, 0.1], [0.0, 0.0, 1.1, 0.1], [0.0, 0.0, 0.2, 0.1]], tf.float32) match_results = tf.constant([-1] * 12) match_list = [matcher.Match(match_results)] decoded_boxlist_list = [] decoded_boxlist_list.append(box_list.BoxList(box_corners)) min_negatives_per_image_list = [0, 1, 2, 4, 5, 6] exp_loc_loss_list = [0, 80, 80 + 1, 80 + 1 + 2 + 10, 80 + 1 + 2 + 10 + 100, 80 + 1 + 2 + 10 + 100 + 20] exp_cls_loss_list = [0, 100, 100 + 90, 100 + 90 + 70 + 60, 100 + 90 + 70 + 60 + 17, 100 + 90 + 70 + 60 + 17 + 13] for min_negatives_per_image, exp_loc_loss, exp_cls_loss in zip( min_negatives_per_image_list, exp_loc_loss_list, exp_cls_loss_list): loss_op = losses.HardExampleMiner( num_hard_examples=None, iou_threshold=0.9999, loss_type='cls', cls_loss_weight=1, loc_loss_weight=1, max_negatives_per_positive=3, min_negatives_per_image=min_negatives_per_image) (loc_loss, cls_loss) = loss_op(location_losses, cls_losses, decoded_boxlist_list, match_list) with self.test_session() as sess: loc_loss_output = sess.run(loc_loss) self.assertAllClose(loc_loss_output, exp_loc_loss) cls_loss_output = sess.run(cls_loss) self.assertAllClose(cls_loss_output, exp_cls_loss) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/core/losses_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keypoint operations. Keypoints are represented as tensors of shape [num_instances, num_keypoints, 2], where the last dimension holds rank 2 tensors of the form [y, x] representing the coordinates of the keypoint. """ import numpy as np import tensorflow as tf def scale(keypoints, y_scale, x_scale, scope=None): """Scales keypoint coordinates in x and y dimensions. Args: keypoints: a tensor of shape [num_instances, num_keypoints, 2] y_scale: (float) scalar tensor x_scale: (float) scalar tensor scope: name scope. Returns: new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] """ with tf.name_scope(scope, 'Scale'): y_scale = tf.cast(y_scale, tf.float32) x_scale = tf.cast(x_scale, tf.float32) new_keypoints = keypoints * [[[y_scale, x_scale]]] return new_keypoints def clip_to_window(keypoints, window, scope=None): """Clips keypoints to a window. This op clips any input keypoints to a window. Args: keypoints: a tensor of shape [num_instances, num_keypoints, 2] window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max] window to which the op should clip the keypoints. scope: name scope. Returns: new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] """ with tf.name_scope(scope, 'ClipToWindow'): y, x = tf.split(value=keypoints, num_or_size_splits=2, axis=2) win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window) y = tf.maximum(tf.minimum(y, win_y_max), win_y_min) x = tf.maximum(tf.minimum(x, win_x_max), win_x_min) new_keypoints = tf.concat([y, x], 2) return new_keypoints def prune_outside_window(keypoints, window, scope=None): """Prunes keypoints that fall outside a given window. This function replaces keypoints that fall outside the given window with nan. See also clip_to_window which clips any keypoints that fall outside the given window. Args: keypoints: a tensor of shape [num_instances, num_keypoints, 2] window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max] window outside of which the op should prune the keypoints. scope: name scope. Returns: new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] """ with tf.name_scope(scope, 'PruneOutsideWindow'): y, x = tf.split(value=keypoints, num_or_size_splits=2, axis=2) win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window) valid_indices = tf.logical_and( tf.logical_and(y >= win_y_min, y <= win_y_max), tf.logical_and(x >= win_x_min, x <= win_x_max)) new_y = tf.where(valid_indices, y, np.nan * tf.ones_like(y)) new_x = tf.where(valid_indices, x, np.nan * tf.ones_like(x)) new_keypoints = tf.concat([new_y, new_x], 2) return new_keypoints def change_coordinate_frame(keypoints, window, scope=None): """Changes coordinate frame of the keypoints to be relative to window's frame. Given a window of the form [y_min, x_min, y_max, x_max], changes keypoint coordinates from keypoints of shape [num_instances, num_keypoints, 2] to be relative to this window. An example use case is data augmentation: where we are given groundtruth keypoints and would like to randomly crop the image to some window. In this case we need to change the coordinate frame of each groundtruth keypoint to be relative to this new window. Args: keypoints: a tensor of shape [num_instances, num_keypoints, 2] window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max] window we should change the coordinate frame to. scope: name scope. Returns: new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] """ with tf.name_scope(scope, 'ChangeCoordinateFrame'): win_height = window[2] - window[0] win_width = window[3] - window[1] new_keypoints = scale(keypoints - [window[0], window[1]], 1.0 / win_height, 1.0 / win_width) return new_keypoints def to_normalized_coordinates(keypoints, height, width, check_range=True, scope=None): """Converts absolute keypoint coordinates to normalized coordinates in [0, 1]. Usually one uses the dynamic shape of the image or conv-layer tensor: keypoints = keypoint_ops.to_normalized_coordinates(keypoints, tf.shape(images)[1], tf.shape(images)[2]), This function raises an assertion failed error at graph execution time when the maximum coordinate is smaller than 1.01 (which means that coordinates are already normalized). The value 1.01 is to deal with small rounding errors. Args: keypoints: A tensor of shape [num_instances, num_keypoints, 2]. height: Maximum value for y coordinate of absolute keypoint coordinates. width: Maximum value for x coordinate of absolute keypoint coordinates. check_range: If True, checks if the coordinates are normalized. scope: name scope. Returns: tensor of shape [num_instances, num_keypoints, 2] with normalized coordinates in [0, 1]. """ with tf.name_scope(scope, 'ToNormalizedCoordinates'): height = tf.cast(height, tf.float32) width = tf.cast(width, tf.float32) if check_range: max_val = tf.reduce_max(keypoints) max_assert = tf.Assert(tf.greater(max_val, 1.01), ['max value is lower than 1.01: ', max_val]) with tf.control_dependencies([max_assert]): width = tf.identity(width) return scale(keypoints, 1.0 / height, 1.0 / width) def to_absolute_coordinates(keypoints, height, width, check_range=True, scope=None): """Converts normalized keypoint coordinates to absolute pixel coordinates. This function raises an assertion failed error when the maximum keypoint coordinate value is larger than 1.01 (in which case coordinates are already absolute). Args: keypoints: A tensor of shape [num_instances, num_keypoints, 2] height: Maximum value for y coordinate of absolute keypoint coordinates. width: Maximum value for x coordinate of absolute keypoint coordinates. check_range: If True, checks if the coordinates are normalized or not. scope: name scope. Returns: tensor of shape [num_instances, num_keypoints, 2] with absolute coordinates in terms of the image size. """ with tf.name_scope(scope, 'ToAbsoluteCoordinates'): height = tf.cast(height, tf.float32) width = tf.cast(width, tf.float32) # Ensure range of input keypoints is correct. if check_range: max_val = tf.reduce_max(keypoints) max_assert = tf.Assert(tf.greater_equal(1.01, max_val), ['maximum keypoint coordinate value is larger ' 'than 1.01: ', max_val]) with tf.control_dependencies([max_assert]): width = tf.identity(width) return scale(keypoints, height, width) def flip_horizontal(keypoints, flip_point, flip_permutation, scope=None): """Flips the keypoints horizontally around the flip_point. This operation flips the x coordinate for each keypoint around the flip_point and also permutes the keypoints in a manner specified by flip_permutation. Args: keypoints: a tensor of shape [num_instances, num_keypoints, 2] flip_point: (float) scalar tensor representing the x coordinate to flip the keypoints around. flip_permutation: rank 1 int32 tensor containing the keypoint flip permutation. This specifies the mapping from original keypoint indices to the flipped keypoint indices. This is used primarily for keypoints that are not reflection invariant. E.g. Suppose there are 3 keypoints representing ['head', 'right_eye', 'left_eye'], then a logical choice for flip_permutation might be [0, 2, 1] since we want to swap the 'left_eye' and 'right_eye' after a horizontal flip. scope: name scope. Returns: new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] """ with tf.name_scope(scope, 'FlipHorizontal'): keypoints = tf.transpose(keypoints, [1, 0, 2]) keypoints = tf.gather(keypoints, flip_permutation) v, u = tf.split(value=keypoints, num_or_size_splits=2, axis=2) u = flip_point * 2.0 - u new_keypoints = tf.concat([v, u], 2) new_keypoints = tf.transpose(new_keypoints, [1, 0, 2]) return new_keypoints def flip_vertical(keypoints, flip_point, flip_permutation, scope=None): """Flips the keypoints vertically around the flip_point. This operation flips the y coordinate for each keypoint around the flip_point and also permutes the keypoints in a manner specified by flip_permutation. Args: keypoints: a tensor of shape [num_instances, num_keypoints, 2] flip_point: (float) scalar tensor representing the y coordinate to flip the keypoints around. flip_permutation: rank 1 int32 tensor containing the keypoint flip permutation. This specifies the mapping from original keypoint indices to the flipped keypoint indices. This is used primarily for keypoints that are not reflection invariant. E.g. Suppose there are 3 keypoints representing ['head', 'right_eye', 'left_eye'], then a logical choice for flip_permutation might be [0, 2, 1] since we want to swap the 'left_eye' and 'right_eye' after a horizontal flip. scope: name scope. Returns: new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] """ with tf.name_scope(scope, 'FlipVertical'): keypoints = tf.transpose(keypoints, [1, 0, 2]) keypoints = tf.gather(keypoints, flip_permutation) v, u = tf.split(value=keypoints, num_or_size_splits=2, axis=2) v = flip_point * 2.0 - v new_keypoints = tf.concat([v, u], 2) new_keypoints = tf.transpose(new_keypoints, [1, 0, 2]) return new_keypoints def rot90(keypoints, scope=None): """Rotates the keypoints counter-clockwise by 90 degrees. Args: keypoints: a tensor of shape [num_instances, num_keypoints, 2] scope: name scope. Returns: new_keypoints: a tensor of shape [num_instances, num_keypoints, 2] """ with tf.name_scope(scope, 'Rot90'): keypoints = tf.transpose(keypoints, [1, 0, 2]) v, u = tf.split(value=keypoints[:, :, ::-1], num_or_size_splits=2, axis=2) v = 1.0 - v new_keypoints = tf.concat([v, u], 2) new_keypoints = tf.transpose(new_keypoints, [1, 0, 2]) return new_keypoints
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/core/keypoint_ops.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for region_similarity_calculator.""" import tensorflow as tf from object_detection.core import box_list from object_detection.core import region_similarity_calculator from object_detection.core import standard_fields as fields class RegionSimilarityCalculatorTest(tf.test.TestCase): def test_get_correct_pairwise_similarity_based_on_iou(self): corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]]) exp_output = [[2.0 / 16.0, 0, 6.0 / 400.0], [1.0 / 16.0, 0.0, 5.0 / 400.0]] boxes1 = box_list.BoxList(corners1) boxes2 = box_list.BoxList(corners2) iou_similarity_calculator = region_similarity_calculator.IouSimilarity() iou_similarity = iou_similarity_calculator.compare(boxes1, boxes2) with self.test_session() as sess: iou_output = sess.run(iou_similarity) self.assertAllClose(iou_output, exp_output) def test_get_correct_pairwise_similarity_based_on_squared_distances(self): corners1 = tf.constant([[0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 0.0, 2.0]]) corners2 = tf.constant([[3.0, 4.0, 1.0, 0.0], [-4.0, 0.0, 0.0, 3.0], [0.0, 0.0, 0.0, 0.0]]) exp_output = [[-26, -25, 0], [-18, -27, -6]] boxes1 = box_list.BoxList(corners1) boxes2 = box_list.BoxList(corners2) dist_similarity_calc = region_similarity_calculator.NegSqDistSimilarity() dist_similarity = dist_similarity_calc.compare(boxes1, boxes2) with self.test_session() as sess: dist_output = sess.run(dist_similarity) self.assertAllClose(dist_output, exp_output) def test_get_correct_pairwise_similarity_based_on_ioa(self): corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]]) exp_output_1 = [[2.0 / 12.0, 0, 6.0 / 400.0], [1.0 / 12.0, 0.0, 5.0 / 400.0]] exp_output_2 = [[2.0 / 6.0, 1.0 / 5.0], [0, 0], [6.0 / 6.0, 5.0 / 5.0]] boxes1 = box_list.BoxList(corners1) boxes2 = box_list.BoxList(corners2) ioa_similarity_calculator = region_similarity_calculator.IoaSimilarity() ioa_similarity_1 = ioa_similarity_calculator.compare(boxes1, boxes2) ioa_similarity_2 = ioa_similarity_calculator.compare(boxes2, boxes1) with self.test_session() as sess: iou_output_1, iou_output_2 = sess.run( [ioa_similarity_1, ioa_similarity_2]) self.assertAllClose(iou_output_1, exp_output_1) self.assertAllClose(iou_output_2, exp_output_2) def test_get_correct_pairwise_similarity_based_on_thresholded_iou(self): corners1 = tf.constant([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]]) corners2 = tf.constant([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]]) scores = tf.constant([.3, .6]) iou_threshold = .013 exp_output = tf.constant([[0.3, 0., 0.3], [0.6, 0., 0.]]) boxes1 = box_list.BoxList(corners1) boxes1.add_field(fields.BoxListFields.scores, scores) boxes2 = box_list.BoxList(corners2) iou_similarity_calculator = ( region_similarity_calculator.ThresholdedIouSimilarity( iou_threshold=iou_threshold)) iou_similarity = iou_similarity_calculator.compare(boxes1, boxes2) with self.test_session() as sess: iou_output = sess.run(iou_similarity) self.assertAllClose(iou_output, exp_output) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/core/region_similarity_calculator_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Provides functions to batch a dictionary of input tensors.""" import collections import tensorflow as tf from object_detection.core import prefetcher rt_shape_str = '_runtime_shapes' class BatchQueue(object): """BatchQueue class. This class creates a batch queue to asynchronously enqueue tensors_dict. It also adds a FIFO prefetcher so that the batches are readily available for the consumers. Dequeue ops for a BatchQueue object can be created via the Dequeue method which evaluates to a batch of tensor_dict. Example input pipeline with batching: ------------------------------------ key, string_tensor = slim.parallel_reader.parallel_read(...) tensor_dict = decoder.decode(string_tensor) tensor_dict = preprocessor.preprocess(tensor_dict, ...) batch_queue = batcher.BatchQueue(tensor_dict, batch_size=32, batch_queue_capacity=2000, num_batch_queue_threads=8, prefetch_queue_capacity=20) tensor_dict = batch_queue.dequeue() outputs = Model(tensor_dict) ... ----------------------------------- Notes: ----- This class batches tensors of unequal sizes by zero padding and unpadding them after generating a batch. This can be computationally expensive when batching tensors (such as images) that are of vastly different sizes. So it is recommended that the shapes of such tensors be fully defined in tensor_dict while other lightweight tensors such as bounding box corners and class labels can be of varying sizes. Use either crop or resize operations to fully define the shape of an image in tensor_dict. It is also recommended to perform any preprocessing operations on tensors before passing to BatchQueue and subsequently calling the Dequeue method. Another caveat is that this class does not read the last batch if it is not full. The current implementation makes it hard to support that use case. So, for evaluation, when it is critical to run all the examples through your network use the input pipeline example mentioned in core/prefetcher.py. """ def __init__(self, tensor_dict, batch_size, batch_queue_capacity, num_batch_queue_threads, prefetch_queue_capacity): """Constructs a batch queue holding tensor_dict. Args: tensor_dict: dictionary of tensors to batch. batch_size: batch size. batch_queue_capacity: max capacity of the queue from which the tensors are batched. num_batch_queue_threads: number of threads to use for batching. prefetch_queue_capacity: max capacity of the queue used to prefetch assembled batches. """ # Remember static shapes to set shapes of batched tensors. static_shapes = collections.OrderedDict( {key: tensor.get_shape() for key, tensor in tensor_dict.items()}) # Remember runtime shapes to unpad tensors after batching. runtime_shapes = collections.OrderedDict( {(key + rt_shape_str): tf.shape(tensor) for key, tensor in tensor_dict.items()}) all_tensors = tensor_dict all_tensors.update(runtime_shapes) batched_tensors = tf.train.batch( all_tensors, capacity=batch_queue_capacity, batch_size=batch_size, dynamic_pad=True, num_threads=num_batch_queue_threads) self._queue = prefetcher.prefetch(batched_tensors, prefetch_queue_capacity) self._static_shapes = static_shapes self._batch_size = batch_size def dequeue(self): """Dequeues a batch of tensor_dict from the BatchQueue. TODO: use allow_smaller_final_batch to allow running over the whole eval set Returns: A list of tensor_dicts of the requested batch_size. """ batched_tensors = self._queue.dequeue() # Separate input tensors from tensors containing their runtime shapes. tensors = {} shapes = {} for key, batched_tensor in batched_tensors.items(): unbatched_tensor_list = tf.unstack(batched_tensor) for i, unbatched_tensor in enumerate(unbatched_tensor_list): if rt_shape_str in key: shapes[(key[:-len(rt_shape_str)], i)] = unbatched_tensor else: tensors[(key, i)] = unbatched_tensor # Undo that padding using shapes and create a list of size `batch_size` that # contains tensor dictionaries. tensor_dict_list = [] batch_size = self._batch_size for batch_id in range(batch_size): tensor_dict = {} for key in self._static_shapes: tensor_dict[key] = tf.slice(tensors[(key, batch_id)], tf.zeros_like(shapes[(key, batch_id)]), shapes[(key, batch_id)]) tensor_dict[key].set_shape(self._static_shapes[key]) tensor_dict_list.append(tensor_dict) return tensor_dict_list
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/core/batcher.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.core.matcher.""" import numpy as np import tensorflow as tf from object_detection.core import matcher class MatchTest(tf.test.TestCase): def test_get_correct_matched_columnIndices(self): match_results = tf.constant([3, 1, -1, 0, -1, 5, -2]) match = matcher.Match(match_results) expected_column_indices = [0, 1, 3, 5] matched_column_indices = match.matched_column_indices() self.assertEquals(matched_column_indices.dtype, tf.int32) with self.test_session() as sess: matched_column_indices = sess.run(matched_column_indices) self.assertAllEqual(matched_column_indices, expected_column_indices) def test_get_correct_counts(self): match_results = tf.constant([3, 1, -1, 0, -1, 5, -2]) match = matcher.Match(match_results) exp_num_matched_columns = 4 exp_num_unmatched_columns = 2 exp_num_ignored_columns = 1 num_matched_columns = match.num_matched_columns() num_unmatched_columns = match.num_unmatched_columns() num_ignored_columns = match.num_ignored_columns() self.assertEquals(num_matched_columns.dtype, tf.int32) self.assertEquals(num_unmatched_columns.dtype, tf.int32) self.assertEquals(num_ignored_columns.dtype, tf.int32) with self.test_session() as sess: (num_matched_columns_out, num_unmatched_columns_out, num_ignored_columns_out) = sess.run( [num_matched_columns, num_unmatched_columns, num_ignored_columns]) self.assertAllEqual(num_matched_columns_out, exp_num_matched_columns) self.assertAllEqual(num_unmatched_columns_out, exp_num_unmatched_columns) self.assertAllEqual(num_ignored_columns_out, exp_num_ignored_columns) def testGetCorrectUnmatchedColumnIndices(self): match_results = tf.constant([3, 1, -1, 0, -1, 5, -2]) match = matcher.Match(match_results) expected_column_indices = [2, 4] unmatched_column_indices = match.unmatched_column_indices() self.assertEquals(unmatched_column_indices.dtype, tf.int32) with self.test_session() as sess: unmatched_column_indices = sess.run(unmatched_column_indices) self.assertAllEqual(unmatched_column_indices, expected_column_indices) def testGetCorrectMatchedRowIndices(self): match_results = tf.constant([3, 1, -1, 0, -1, 5, -2]) match = matcher.Match(match_results) expected_row_indices = [3, 1, 0, 5] matched_row_indices = match.matched_row_indices() self.assertEquals(matched_row_indices.dtype, tf.int32) with self.test_session() as sess: matched_row_inds = sess.run(matched_row_indices) self.assertAllEqual(matched_row_inds, expected_row_indices) def test_get_correct_ignored_column_indices(self): match_results = tf.constant([3, 1, -1, 0, -1, 5, -2]) match = matcher.Match(match_results) expected_column_indices = [6] ignored_column_indices = match.ignored_column_indices() self.assertEquals(ignored_column_indices.dtype, tf.int32) with self.test_session() as sess: ignored_column_indices = sess.run(ignored_column_indices) self.assertAllEqual(ignored_column_indices, expected_column_indices) def test_get_correct_matched_column_indicator(self): match_results = tf.constant([3, 1, -1, 0, -1, 5, -2]) match = matcher.Match(match_results) expected_column_indicator = [True, True, False, True, False, True, False] matched_column_indicator = match.matched_column_indicator() self.assertEquals(matched_column_indicator.dtype, tf.bool) with self.test_session() as sess: matched_column_indicator = sess.run(matched_column_indicator) self.assertAllEqual(matched_column_indicator, expected_column_indicator) def test_get_correct_unmatched_column_indicator(self): match_results = tf.constant([3, 1, -1, 0, -1, 5, -2]) match = matcher.Match(match_results) expected_column_indicator = [False, False, True, False, True, False, False] unmatched_column_indicator = match.unmatched_column_indicator() self.assertEquals(unmatched_column_indicator.dtype, tf.bool) with self.test_session() as sess: unmatched_column_indicator = sess.run(unmatched_column_indicator) self.assertAllEqual(unmatched_column_indicator, expected_column_indicator) def test_get_correct_ignored_column_indicator(self): match_results = tf.constant([3, 1, -1, 0, -1, 5, -2]) match = matcher.Match(match_results) expected_column_indicator = [False, False, False, False, False, False, True] ignored_column_indicator = match.ignored_column_indicator() self.assertEquals(ignored_column_indicator.dtype, tf.bool) with self.test_session() as sess: ignored_column_indicator = sess.run(ignored_column_indicator) self.assertAllEqual(ignored_column_indicator, expected_column_indicator) def test_get_correct_unmatched_ignored_column_indices(self): match_results = tf.constant([3, 1, -1, 0, -1, 5, -2]) match = matcher.Match(match_results) expected_column_indices = [2, 4, 6] unmatched_ignored_column_indices = (match. unmatched_or_ignored_column_indices()) self.assertEquals(unmatched_ignored_column_indices.dtype, tf.int32) with self.test_session() as sess: unmatched_ignored_column_indices = sess.run( unmatched_ignored_column_indices) self.assertAllEqual(unmatched_ignored_column_indices, expected_column_indices) def test_all_columns_accounted_for(self): # Note: deliberately setting to small number so not always # all possibilities appear (matched, unmatched, ignored) num_matches = 10 match_results = tf.random_uniform( [num_matches], minval=-2, maxval=5, dtype=tf.int32) match = matcher.Match(match_results) matched_column_indices = match.matched_column_indices() unmatched_column_indices = match.unmatched_column_indices() ignored_column_indices = match.ignored_column_indices() with self.test_session() as sess: matched, unmatched, ignored = sess.run([ matched_column_indices, unmatched_column_indices, ignored_column_indices ]) all_indices = np.hstack((matched, unmatched, ignored)) all_indices_sorted = np.sort(all_indices) self.assertAllEqual(all_indices_sorted, np.arange(num_matches, dtype=np.int32)) def test_scalar_gather_based_on_match(self): match_results = tf.constant([3, 1, -1, 0, -1, 5, -2]) input_tensor = tf.constant([0, 1, 2, 3, 4, 5, 6, 7], dtype=tf.float32) expected_gathered_tensor = [3, 1, 100, 0, 100, 5, 200] match = matcher.Match(match_results) gathered_tensor = match.gather_based_on_match(input_tensor, unmatched_value=100., ignored_value=200.) self.assertEquals(gathered_tensor.dtype, tf.float32) with self.test_session(): gathered_tensor_out = gathered_tensor.eval() self.assertAllEqual(expected_gathered_tensor, gathered_tensor_out) def test_multidimensional_gather_based_on_match(self): match_results = tf.constant([1, -1, -2]) input_tensor = tf.constant([[0, 0.5, 0, 0.5], [0, 0, 0.5, 0.5]], dtype=tf.float32) expected_gathered_tensor = [[0, 0, 0.5, 0.5], [0, 0, 0, 0], [0, 0, 0, 0]] match = matcher.Match(match_results) gathered_tensor = match.gather_based_on_match(input_tensor, unmatched_value=tf.zeros(4), ignored_value=tf.zeros(4)) self.assertEquals(gathered_tensor.dtype, tf.float32) with self.test_session(): gathered_tensor_out = gathered_tensor.eval() self.assertAllEqual(expected_gathered_tensor, gathered_tensor_out) def test_multidimensional_gather_based_on_match_with_matmul_gather_op(self): match_results = tf.constant([1, -1, -2]) input_tensor = tf.constant([[0, 0.5, 0, 0.5], [0, 0, 0.5, 0.5]], dtype=tf.float32) expected_gathered_tensor = [[0, 0, 0.5, 0.5], [0, 0, 0, 0], [0, 0, 0, 0]] match = matcher.Match(match_results, use_matmul_gather=True) gathered_tensor = match.gather_based_on_match(input_tensor, unmatched_value=tf.zeros(4), ignored_value=tf.zeros(4)) self.assertEquals(gathered_tensor.dtype, tf.float32) with self.test_session() as sess: self.assertTrue( all([op.name is not 'Gather' for op in sess.graph.get_operations()])) gathered_tensor_out = gathered_tensor.eval() self.assertAllEqual(expected_gathered_tensor, gathered_tensor_out) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/core/matcher_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Functions to build DetectionModel training optimizers.""" import tensorflow as tf import horovod.tensorflow as hvd from object_detection.utils import learning_schedules def build(optimizer_config): """Create optimizer based on config. Args: optimizer_config: A Optimizer proto message. Returns: An optimizer and a list of variables for summary. Raises: ValueError: when using an unsupported input data type. """ optimizer_type = optimizer_config.WhichOneof('optimizer') optimizer = None summary_vars = [] if optimizer_type == 'rms_prop_optimizer': config = optimizer_config.rms_prop_optimizer learning_rate = _create_learning_rate(config.learning_rate) summary_vars.append(learning_rate) optimizer = tf.train.RMSPropOptimizer( learning_rate, decay=config.decay, momentum=config.momentum_optimizer_value, epsilon=config.epsilon) if optimizer_type == 'momentum_optimizer': config = optimizer_config.momentum_optimizer learning_rate = _create_learning_rate(config.learning_rate) summary_vars.append(learning_rate) optimizer = tf.train.MomentumOptimizer( learning_rate, momentum=config.momentum_optimizer_value) if optimizer_type == 'adam_optimizer': config = optimizer_config.adam_optimizer learning_rate = _create_learning_rate(config.learning_rate) summary_vars.append(learning_rate) optimizer = tf.train.AdamOptimizer(learning_rate) if optimizer is None: raise ValueError('Optimizer %s not supported.' % optimizer_type) optimizer = hvd.DistributedOptimizer(optimizer) if optimizer_config.use_moving_average: optimizer = tf.contrib.opt.MovingAverageOptimizer( optimizer, average_decay=optimizer_config.moving_average_decay) return optimizer, summary_vars def _create_learning_rate(learning_rate_config): """Create optimizer learning rate based on config. Args: learning_rate_config: A LearningRate proto message. Returns: A learning rate. Raises: ValueError: when using an unsupported input data type. """ learning_rate = None learning_rate_type = learning_rate_config.WhichOneof('learning_rate') if learning_rate_type == 'constant_learning_rate': config = learning_rate_config.constant_learning_rate learning_rate = tf.constant(config.learning_rate, dtype=tf.float32, name='learning_rate') if learning_rate_type == 'exponential_decay_learning_rate': config = learning_rate_config.exponential_decay_learning_rate learning_rate = learning_schedules.exponential_decay_with_burnin( tf.train.get_or_create_global_step(), config.initial_learning_rate, config.decay_steps, config.decay_factor, burnin_learning_rate=config.burnin_learning_rate, burnin_steps=config.burnin_steps, min_learning_rate=config.min_learning_rate, staircase=config.staircase) if learning_rate_type == 'manual_step_learning_rate': config = learning_rate_config.manual_step_learning_rate if not config.schedule: raise ValueError('Empty learning rate schedule.') learning_rate_step_boundaries = [x.step for x in config.schedule] learning_rate_sequence = [config.initial_learning_rate] learning_rate_sequence += [x.learning_rate for x in config.schedule] learning_rate = learning_schedules.manual_stepping( tf.train.get_or_create_global_step(), learning_rate_step_boundaries, learning_rate_sequence, config.warmup) if learning_rate_type == 'cosine_decay_learning_rate': config = learning_rate_config.cosine_decay_learning_rate learning_rate = learning_schedules.cosine_decay_with_warmup( tf.train.get_or_create_global_step(), config.learning_rate_base, config.total_steps, config.warmup_learning_rate, config.warmup_steps, config.hold_base_rate_steps) if learning_rate is None: raise ValueError('Learning_rate %s not supported.' % learning_rate_type) return learning_rate
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/builders/optimizer_builder.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for graph_rewriter_builder.""" import mock import tensorflow as tf from object_detection.builders import graph_rewriter_builder from object_detection.protos import graph_rewriter_pb2 class QuantizationBuilderTest(tf.test.TestCase): def testQuantizationBuilderSetsUpCorrectTrainArguments(self): with mock.patch.object( tf.contrib.quantize, 'create_training_graph') as mock_quant_fn: with mock.patch.object(tf.contrib.layers, 'summarize_collection') as mock_summarize_col: graph_rewriter_proto = graph_rewriter_pb2.GraphRewriter() graph_rewriter_proto.quantization.delay = 10 graph_rewriter_proto.quantization.weight_bits = 8 graph_rewriter_proto.quantization.activation_bits = 8 graph_rewrite_fn = graph_rewriter_builder.build( graph_rewriter_proto, is_training=True) graph_rewrite_fn() _, kwargs = mock_quant_fn.call_args self.assertEqual(kwargs['input_graph'], tf.get_default_graph()) self.assertEqual(kwargs['quant_delay'], 10) mock_summarize_col.assert_called_with('quant_vars') def testQuantizationBuilderSetsUpCorrectEvalArguments(self): with mock.patch.object(tf.contrib.quantize, 'create_eval_graph') as mock_quant_fn: with mock.patch.object(tf.contrib.layers, 'summarize_collection') as mock_summarize_col: graph_rewriter_proto = graph_rewriter_pb2.GraphRewriter() graph_rewriter_proto.quantization.delay = 10 graph_rewrite_fn = graph_rewriter_builder.build( graph_rewriter_proto, is_training=False) graph_rewrite_fn() _, kwargs = mock_quant_fn.call_args self.assertEqual(kwargs['input_graph'], tf.get_default_graph()) mock_summarize_col.assert_called_with('quant_vars') if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/builders/graph_rewriter_builder_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Input reader builder. Creates data sources for DetectionModels from an InputReader config. See input_reader.proto for options. Note: If users wishes to also use their own InputReaders with the Object Detection configuration framework, they should define their own builder function that wraps the build function. """ import tensorflow as tf from object_detection.data_decoders import tf_example_decoder from object_detection.protos import input_reader_pb2 parallel_reader = tf.contrib.slim.parallel_reader def build(input_reader_config): """Builds a tensor dictionary based on the InputReader config. Args: input_reader_config: A input_reader_pb2.InputReader object. Returns: A tensor dict based on the input_reader_config. Raises: ValueError: On invalid input reader proto. ValueError: If no input paths are specified. """ if not isinstance(input_reader_config, input_reader_pb2.InputReader): raise ValueError('input_reader_config not of type ' 'input_reader_pb2.InputReader.') if input_reader_config.WhichOneof('input_reader') == 'tf_record_input_reader': config = input_reader_config.tf_record_input_reader if not config.input_path: raise ValueError('At least one input path must be specified in ' '`input_reader_config`.') _, string_tensor = parallel_reader.parallel_read( config.input_path[:], # Convert `RepeatedScalarContainer` to list. reader_class=tf.TFRecordReader, num_epochs=(input_reader_config.num_epochs if input_reader_config.num_epochs else None), num_readers=input_reader_config.num_readers, shuffle=input_reader_config.shuffle, dtypes=[tf.string, tf.string], capacity=input_reader_config.queue_capacity, min_after_dequeue=input_reader_config.min_after_dequeue) label_map_proto_file = None if input_reader_config.HasField('label_map_path'): label_map_proto_file = input_reader_config.label_map_path decoder = tf_example_decoder.TfExampleDecoder( load_instance_masks=input_reader_config.load_instance_masks, instance_mask_type=input_reader_config.mask_type, label_map_proto_file=label_map_proto_file) return decoder.decode(string_tensor) raise ValueError('Unsupported input_reader_config.')
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/builders/input_reader_builder.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for optimizer_builder.""" import tensorflow as tf from google.protobuf import text_format from object_detection.builders import optimizer_builder from object_detection.protos import optimizer_pb2 class LearningRateBuilderTest(tf.test.TestCase): def testBuildConstantLearningRate(self): learning_rate_text_proto = """ constant_learning_rate { learning_rate: 0.004 } """ learning_rate_proto = optimizer_pb2.LearningRate() text_format.Merge(learning_rate_text_proto, learning_rate_proto) learning_rate = optimizer_builder._create_learning_rate( learning_rate_proto) self.assertTrue(learning_rate.op.name.endswith('learning_rate')) with self.test_session(): learning_rate_out = learning_rate.eval() self.assertAlmostEqual(learning_rate_out, 0.004) def testBuildExponentialDecayLearningRate(self): learning_rate_text_proto = """ exponential_decay_learning_rate { initial_learning_rate: 0.004 decay_steps: 99999 decay_factor: 0.85 staircase: false } """ learning_rate_proto = optimizer_pb2.LearningRate() text_format.Merge(learning_rate_text_proto, learning_rate_proto) learning_rate = optimizer_builder._create_learning_rate( learning_rate_proto) self.assertTrue(learning_rate.op.name.endswith('learning_rate')) self.assertTrue(isinstance(learning_rate, tf.Tensor)) def testBuildManualStepLearningRate(self): learning_rate_text_proto = """ manual_step_learning_rate { initial_learning_rate: 0.002 schedule { step: 100 learning_rate: 0.006 } schedule { step: 90000 learning_rate: 0.00006 } warmup: true } """ learning_rate_proto = optimizer_pb2.LearningRate() text_format.Merge(learning_rate_text_proto, learning_rate_proto) learning_rate = optimizer_builder._create_learning_rate( learning_rate_proto) self.assertTrue(isinstance(learning_rate, tf.Tensor)) def testBuildCosineDecayLearningRate(self): learning_rate_text_proto = """ cosine_decay_learning_rate { learning_rate_base: 0.002 total_steps: 20000 warmup_learning_rate: 0.0001 warmup_steps: 1000 hold_base_rate_steps: 20000 } """ learning_rate_proto = optimizer_pb2.LearningRate() text_format.Merge(learning_rate_text_proto, learning_rate_proto) learning_rate = optimizer_builder._create_learning_rate( learning_rate_proto) self.assertTrue(isinstance(learning_rate, tf.Tensor)) def testRaiseErrorOnEmptyLearningRate(self): learning_rate_text_proto = """ """ learning_rate_proto = optimizer_pb2.LearningRate() text_format.Merge(learning_rate_text_proto, learning_rate_proto) with self.assertRaises(ValueError): optimizer_builder._create_learning_rate(learning_rate_proto) class OptimizerBuilderTest(tf.test.TestCase): def testBuildRMSPropOptimizer(self): optimizer_text_proto = """ rms_prop_optimizer: { learning_rate: { exponential_decay_learning_rate { initial_learning_rate: 0.004 decay_steps: 800720 decay_factor: 0.95 } } momentum_optimizer_value: 0.9 decay: 0.9 epsilon: 1.0 } use_moving_average: false """ optimizer_proto = optimizer_pb2.Optimizer() text_format.Merge(optimizer_text_proto, optimizer_proto) optimizer, _ = optimizer_builder.build(optimizer_proto) self.assertTrue(isinstance(optimizer, tf.train.RMSPropOptimizer)) def testBuildMomentumOptimizer(self): optimizer_text_proto = """ momentum_optimizer: { learning_rate: { constant_learning_rate { learning_rate: 0.001 } } momentum_optimizer_value: 0.99 } use_moving_average: false """ optimizer_proto = optimizer_pb2.Optimizer() text_format.Merge(optimizer_text_proto, optimizer_proto) optimizer, _ = optimizer_builder.build(optimizer_proto) self.assertTrue(isinstance(optimizer, tf.train.MomentumOptimizer)) def testBuildAdamOptimizer(self): optimizer_text_proto = """ adam_optimizer: { learning_rate: { constant_learning_rate { learning_rate: 0.002 } } } use_moving_average: false """ optimizer_proto = optimizer_pb2.Optimizer() text_format.Merge(optimizer_text_proto, optimizer_proto) optimizer, _ = optimizer_builder.build(optimizer_proto) self.assertTrue(isinstance(optimizer, tf.train.AdamOptimizer)) def testBuildMovingAverageOptimizer(self): optimizer_text_proto = """ adam_optimizer: { learning_rate: { constant_learning_rate { learning_rate: 0.002 } } } use_moving_average: True """ optimizer_proto = optimizer_pb2.Optimizer() text_format.Merge(optimizer_text_proto, optimizer_proto) optimizer, _ = optimizer_builder.build(optimizer_proto) self.assertTrue( isinstance(optimizer, tf.contrib.opt.MovingAverageOptimizer)) def testBuildMovingAverageOptimizerWithNonDefaultDecay(self): optimizer_text_proto = """ adam_optimizer: { learning_rate: { constant_learning_rate { learning_rate: 0.002 } } } use_moving_average: True moving_average_decay: 0.2 """ optimizer_proto = optimizer_pb2.Optimizer() text_format.Merge(optimizer_text_proto, optimizer_proto) optimizer, _ = optimizer_builder.build(optimizer_proto) self.assertTrue( isinstance(optimizer, tf.contrib.opt.MovingAverageOptimizer)) # TODO(rathodv): Find a way to not depend on the private members. self.assertAlmostEqual(optimizer._ema._decay, 0.2) def testBuildEmptyOptimizer(self): optimizer_text_proto = """ """ optimizer_proto = optimizer_pb2.Optimizer() text_format.Merge(optimizer_text_proto, optimizer_proto) with self.assertRaises(ValueError): optimizer_builder.build(optimizer_proto) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/builders/optimizer_builder_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functions for quantized training and evaluation.""" import tensorflow as tf def build(graph_rewriter_config, is_training): """Returns a function that modifies default graph based on options. Args: graph_rewriter_config: graph_rewriter_pb2.GraphRewriter proto. is_training: whether in training of eval mode. """ def graph_rewrite_fn(): """Function to quantize weights and activation of the default graph.""" if (graph_rewriter_config.quantization.weight_bits != 8 or graph_rewriter_config.quantization.activation_bits != 8): raise ValueError('Only 8bit quantization is supported') # Quantize the graph by inserting quantize ops for weights and activations if is_training: tf.contrib.quantize.create_training_graph( input_graph=tf.get_default_graph(), quant_delay=graph_rewriter_config.quantization.delay) else: tf.contrib.quantize.create_eval_graph(input_graph=tf.get_default_graph()) tf.contrib.layers.summarize_collection('quant_vars') return graph_rewrite_fn
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/builders/graph_rewriter_builder.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A function to build a DetectionModel from configuration.""" import functools from object_detection.builders import anchor_generator_builder from object_detection.builders import box_coder_builder from object_detection.builders import box_predictor_builder from object_detection.builders import hyperparams_builder from object_detection.builders import image_resizer_builder from object_detection.builders import losses_builder from object_detection.builders import matcher_builder from object_detection.builders import post_processing_builder from object_detection.builders import region_similarity_calculator_builder as sim_calc from object_detection.core import balanced_positive_negative_sampler as sampler from object_detection.core import post_processing from object_detection.core import target_assigner from object_detection.meta_architectures import faster_rcnn_meta_arch from object_detection.meta_architectures import rfcn_meta_arch from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import faster_rcnn_inception_resnet_v2_feature_extractor as frcnn_inc_res from object_detection.models import faster_rcnn_inception_v2_feature_extractor as frcnn_inc_v2 from object_detection.models import faster_rcnn_nas_feature_extractor as frcnn_nas from object_detection.models import faster_rcnn_pnas_feature_extractor as frcnn_pnas from object_detection.models import faster_rcnn_resnet_v1_feature_extractor as frcnn_resnet_v1 from object_detection.models import ssd_resnet_v1_fpn_feature_extractor as ssd_resnet_v1_fpn from object_detection.models import ssd_resnet_v1_ppn_feature_extractor as ssd_resnet_v1_ppn from object_detection.models.embedded_ssd_mobilenet_v1_feature_extractor import EmbeddedSSDMobileNetV1FeatureExtractor from object_detection.models.ssd_inception_v2_feature_extractor import SSDInceptionV2FeatureExtractor from object_detection.models.ssd_inception_v3_feature_extractor import SSDInceptionV3FeatureExtractor from object_detection.models.ssd_mobilenet_v1_feature_extractor import SSDMobileNetV1FeatureExtractor from object_detection.models.ssd_mobilenet_v1_fpn_feature_extractor import SSDMobileNetV1FpnFeatureExtractor from object_detection.models.ssd_mobilenet_v1_ppn_feature_extractor import SSDMobileNetV1PpnFeatureExtractor from object_detection.models.ssd_mobilenet_v2_feature_extractor import SSDMobileNetV2FeatureExtractor from object_detection.models.ssd_mobilenet_v2_fpn_feature_extractor import SSDMobileNetV2FpnFeatureExtractor from object_detection.models.ssd_mobilenet_v2_keras_feature_extractor import SSDMobileNetV2KerasFeatureExtractor from object_detection.models.ssd_pnasnet_feature_extractor import SSDPNASNetFeatureExtractor from object_detection.predictors import rfcn_box_predictor from object_detection.predictors.heads import mask_head from object_detection.protos import model_pb2 from object_detection.utils import ops # A map of names to SSD feature extractors. SSD_FEATURE_EXTRACTOR_CLASS_MAP = { 'ssd_inception_v2': SSDInceptionV2FeatureExtractor, 'ssd_inception_v3': SSDInceptionV3FeatureExtractor, 'ssd_mobilenet_v1': SSDMobileNetV1FeatureExtractor, 'ssd_mobilenet_v1_fpn': SSDMobileNetV1FpnFeatureExtractor, 'ssd_mobilenet_v1_ppn': SSDMobileNetV1PpnFeatureExtractor, 'ssd_mobilenet_v2': SSDMobileNetV2FeatureExtractor, 'ssd_mobilenet_v2_fpn': SSDMobileNetV2FpnFeatureExtractor, 'ssd_resnet50_v1_fpn': ssd_resnet_v1_fpn.SSDResnet50V1FpnFeatureExtractor, 'ssd_resnet101_v1_fpn': ssd_resnet_v1_fpn.SSDResnet101V1FpnFeatureExtractor, 'ssd_resnet152_v1_fpn': ssd_resnet_v1_fpn.SSDResnet152V1FpnFeatureExtractor, 'ssd_resnet50_v1_ppn': ssd_resnet_v1_ppn.SSDResnet50V1PpnFeatureExtractor, 'ssd_resnet101_v1_ppn': ssd_resnet_v1_ppn.SSDResnet101V1PpnFeatureExtractor, 'ssd_resnet152_v1_ppn': ssd_resnet_v1_ppn.SSDResnet152V1PpnFeatureExtractor, 'embedded_ssd_mobilenet_v1': EmbeddedSSDMobileNetV1FeatureExtractor, 'ssd_pnasnet': SSDPNASNetFeatureExtractor, } SSD_KERAS_FEATURE_EXTRACTOR_CLASS_MAP = { 'ssd_mobilenet_v2_keras': SSDMobileNetV2KerasFeatureExtractor } # A map of names to Faster R-CNN feature extractors. FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP = { 'faster_rcnn_nas': frcnn_nas.FasterRCNNNASFeatureExtractor, 'faster_rcnn_pnas': frcnn_pnas.FasterRCNNPNASFeatureExtractor, 'faster_rcnn_inception_resnet_v2': frcnn_inc_res.FasterRCNNInceptionResnetV2FeatureExtractor, 'faster_rcnn_inception_v2': frcnn_inc_v2.FasterRCNNInceptionV2FeatureExtractor, 'faster_rcnn_resnet50': frcnn_resnet_v1.FasterRCNNResnet50FeatureExtractor, 'faster_rcnn_resnet101': frcnn_resnet_v1.FasterRCNNResnet101FeatureExtractor, 'faster_rcnn_resnet152': frcnn_resnet_v1.FasterRCNNResnet152FeatureExtractor, } def build(model_config, is_training, add_summaries=True): """Builds a DetectionModel based on the model config. Args: model_config: A model.proto object containing the config for the desired DetectionModel. is_training: True if this model is being built for training purposes. add_summaries: Whether to add tensorflow summaries in the model graph. Returns: DetectionModel based on the config. Raises: ValueError: On invalid meta architecture or model. """ if not isinstance(model_config, model_pb2.DetectionModel): raise ValueError('model_config not of type model_pb2.DetectionModel.') meta_architecture = model_config.WhichOneof('model') if meta_architecture == 'ssd': return _build_ssd_model(model_config.ssd, is_training, add_summaries) if meta_architecture == 'faster_rcnn': return _build_faster_rcnn_model(model_config.faster_rcnn, is_training, add_summaries) raise ValueError('Unknown meta architecture: {}'.format(meta_architecture)) def _build_ssd_feature_extractor(feature_extractor_config, is_training, freeze_batchnorm, reuse_weights=None): """Builds a ssd_meta_arch.SSDFeatureExtractor based on config. Args: feature_extractor_config: A SSDFeatureExtractor proto config from ssd.proto. is_training: True if this feature extractor is being built for training. freeze_batchnorm: Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. reuse_weights: if the feature extractor should reuse weights. Returns: ssd_meta_arch.SSDFeatureExtractor based on config. Raises: ValueError: On invalid feature extractor type. """ feature_type = feature_extractor_config.type is_keras_extractor = feature_type in SSD_KERAS_FEATURE_EXTRACTOR_CLASS_MAP depth_multiplier = feature_extractor_config.depth_multiplier min_depth = feature_extractor_config.min_depth pad_to_multiple = feature_extractor_config.pad_to_multiple use_explicit_padding = feature_extractor_config.use_explicit_padding use_depthwise = feature_extractor_config.use_depthwise if is_keras_extractor: conv_hyperparams = hyperparams_builder.KerasLayerHyperparams( feature_extractor_config.conv_hyperparams) else: conv_hyperparams = hyperparams_builder.build( feature_extractor_config.conv_hyperparams, is_training) override_base_feature_extractor_hyperparams = ( feature_extractor_config.override_base_feature_extractor_hyperparams) if (feature_type not in SSD_FEATURE_EXTRACTOR_CLASS_MAP) and ( not is_keras_extractor): raise ValueError('Unknown ssd feature_extractor: {}'.format(feature_type)) if is_keras_extractor: feature_extractor_class = SSD_KERAS_FEATURE_EXTRACTOR_CLASS_MAP[ feature_type] else: feature_extractor_class = SSD_FEATURE_EXTRACTOR_CLASS_MAP[feature_type] kwargs = { 'is_training': is_training, 'depth_multiplier': depth_multiplier, 'min_depth': min_depth, 'pad_to_multiple': pad_to_multiple, 'use_explicit_padding': use_explicit_padding, 'use_depthwise': use_depthwise, 'override_base_feature_extractor_hyperparams': override_base_feature_extractor_hyperparams } if is_keras_extractor: kwargs.update({ 'conv_hyperparams': conv_hyperparams, 'inplace_batchnorm_update': False, 'freeze_batchnorm': freeze_batchnorm }) else: kwargs.update({ 'conv_hyperparams_fn': conv_hyperparams, 'reuse_weights': reuse_weights, }) if feature_extractor_config.HasField('fpn'): kwargs.update({ 'fpn_min_level': feature_extractor_config.fpn.min_level, 'fpn_max_level': feature_extractor_config.fpn.max_level, 'additional_layer_depth': feature_extractor_config.fpn.additional_layer_depth, }) return feature_extractor_class(**kwargs) def _build_ssd_model(ssd_config, is_training, add_summaries): """Builds an SSD detection model based on the model config. Args: ssd_config: A ssd.proto object containing the config for the desired SSDMetaArch. is_training: True if this model is being built for training purposes. add_summaries: Whether to add tf summaries in the model. Returns: SSDMetaArch based on the config. Raises: ValueError: If ssd_config.type is not recognized (i.e. not registered in model_class_map). """ num_classes = ssd_config.num_classes # Feature extractor feature_extractor = _build_ssd_feature_extractor( feature_extractor_config=ssd_config.feature_extractor, freeze_batchnorm=ssd_config.freeze_batchnorm, is_training=is_training) box_coder = box_coder_builder.build(ssd_config.box_coder) matcher = matcher_builder.build(ssd_config.matcher) region_similarity_calculator = sim_calc.build( ssd_config.similarity_calculator) encode_background_as_zeros = ssd_config.encode_background_as_zeros negative_class_weight = ssd_config.negative_class_weight anchor_generator = anchor_generator_builder.build( ssd_config.anchor_generator) if feature_extractor.is_keras_model: ssd_box_predictor = box_predictor_builder.build_keras( conv_hyperparams_fn=hyperparams_builder.KerasLayerHyperparams, freeze_batchnorm=ssd_config.freeze_batchnorm, inplace_batchnorm_update=False, num_predictions_per_location_list=anchor_generator .num_anchors_per_location(), box_predictor_config=ssd_config.box_predictor, is_training=is_training, num_classes=num_classes, add_background_class=ssd_config.add_background_class) else: ssd_box_predictor = box_predictor_builder.build( hyperparams_builder.build, ssd_config.box_predictor, is_training, num_classes, ssd_config.add_background_class) image_resizer_fn = image_resizer_builder.build(ssd_config.image_resizer) non_max_suppression_fn, score_conversion_fn = post_processing_builder.build( ssd_config.post_processing) (classification_loss, localization_loss, classification_weight, localization_weight, hard_example_miner, random_example_sampler, expected_loss_weights_fn) = losses_builder.build(ssd_config.loss) normalize_loss_by_num_matches = ssd_config.normalize_loss_by_num_matches normalize_loc_loss_by_codesize = ssd_config.normalize_loc_loss_by_codesize equalization_loss_config = ops.EqualizationLossConfig( weight=ssd_config.loss.equalization_loss.weight, exclude_prefixes=ssd_config.loss.equalization_loss.exclude_prefixes) target_assigner_instance = target_assigner.TargetAssigner( region_similarity_calculator, matcher, box_coder, negative_class_weight=negative_class_weight) ssd_meta_arch_fn = ssd_meta_arch.SSDMetaArch kwargs = {} return ssd_meta_arch_fn( is_training=is_training, anchor_generator=anchor_generator, box_predictor=ssd_box_predictor, box_coder=box_coder, feature_extractor=feature_extractor, encode_background_as_zeros=encode_background_as_zeros, image_resizer_fn=image_resizer_fn, non_max_suppression_fn=non_max_suppression_fn, score_conversion_fn=score_conversion_fn, classification_loss=classification_loss, localization_loss=localization_loss, classification_loss_weight=classification_weight, localization_loss_weight=localization_weight, normalize_loss_by_num_matches=normalize_loss_by_num_matches, hard_example_miner=hard_example_miner, target_assigner_instance=target_assigner_instance, add_summaries=add_summaries, normalize_loc_loss_by_codesize=normalize_loc_loss_by_codesize, freeze_batchnorm=ssd_config.freeze_batchnorm, inplace_batchnorm_update=ssd_config.inplace_batchnorm_update, add_background_class=ssd_config.add_background_class, explicit_background_class=ssd_config.explicit_background_class, random_example_sampler=random_example_sampler, expected_loss_weights_fn=expected_loss_weights_fn, use_confidences_as_targets=ssd_config.use_confidences_as_targets, implicit_example_weight=ssd_config.implicit_example_weight, equalization_loss_config=equalization_loss_config, **kwargs) def _build_faster_rcnn_feature_extractor( feature_extractor_config, is_training, reuse_weights=None, inplace_batchnorm_update=False): """Builds a faster_rcnn_meta_arch.FasterRCNNFeatureExtractor based on config. Args: feature_extractor_config: A FasterRcnnFeatureExtractor proto config from faster_rcnn.proto. is_training: True if this feature extractor is being built for training. reuse_weights: if the feature extractor should reuse weights. inplace_batchnorm_update: Whether to update batch_norm inplace during training. This is required for batch norm to work correctly on TPUs. When this is false, user must add a control dependency on tf.GraphKeys.UPDATE_OPS for train/loss op in order to update the batch norm moving average parameters. Returns: faster_rcnn_meta_arch.FasterRCNNFeatureExtractor based on config. Raises: ValueError: On invalid feature extractor type. """ if inplace_batchnorm_update: raise ValueError('inplace batchnorm updates not supported.') feature_type = feature_extractor_config.type first_stage_features_stride = ( feature_extractor_config.first_stage_features_stride) batch_norm_trainable = feature_extractor_config.batch_norm_trainable if feature_type not in FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP: raise ValueError('Unknown Faster R-CNN feature_extractor: {}'.format( feature_type)) feature_extractor_class = FASTER_RCNN_FEATURE_EXTRACTOR_CLASS_MAP[ feature_type] return feature_extractor_class( is_training, first_stage_features_stride, batch_norm_trainable, reuse_weights) def _build_faster_rcnn_model(frcnn_config, is_training, add_summaries): """Builds a Faster R-CNN or R-FCN detection model based on the model config. Builds R-FCN model if the second_stage_box_predictor in the config is of type `rfcn_box_predictor` else builds a Faster R-CNN model. Args: frcnn_config: A faster_rcnn.proto object containing the config for the desired FasterRCNNMetaArch or RFCNMetaArch. is_training: True if this model is being built for training purposes. add_summaries: Whether to add tf summaries in the model. Returns: FasterRCNNMetaArch based on the config. Raises: ValueError: If frcnn_config.type is not recognized (i.e. not registered in model_class_map). """ num_classes = frcnn_config.num_classes image_resizer_fn = image_resizer_builder.build(frcnn_config.image_resizer) feature_extractor = _build_faster_rcnn_feature_extractor( frcnn_config.feature_extractor, is_training, inplace_batchnorm_update=frcnn_config.inplace_batchnorm_update) number_of_stages = frcnn_config.number_of_stages first_stage_anchor_generator = anchor_generator_builder.build( frcnn_config.first_stage_anchor_generator) first_stage_target_assigner = target_assigner.create_target_assigner( 'FasterRCNN', 'proposal', use_matmul_gather=frcnn_config.use_matmul_gather_in_matcher) first_stage_atrous_rate = frcnn_config.first_stage_atrous_rate first_stage_box_predictor_arg_scope_fn = hyperparams_builder.build( frcnn_config.first_stage_box_predictor_conv_hyperparams, is_training) first_stage_box_predictor_kernel_size = ( frcnn_config.first_stage_box_predictor_kernel_size) first_stage_box_predictor_depth = frcnn_config.first_stage_box_predictor_depth first_stage_minibatch_size = frcnn_config.first_stage_minibatch_size use_static_shapes = frcnn_config.use_static_shapes and ( frcnn_config.use_static_shapes_for_eval or is_training) first_stage_sampler = sampler.BalancedPositiveNegativeSampler( positive_fraction=frcnn_config.first_stage_positive_balance_fraction, is_static=(frcnn_config.use_static_balanced_label_sampler and use_static_shapes)) first_stage_max_proposals = frcnn_config.first_stage_max_proposals if (frcnn_config.first_stage_nms_iou_threshold < 0 or frcnn_config.first_stage_nms_iou_threshold > 1.0): raise ValueError('iou_threshold not in [0, 1.0].') if (is_training and frcnn_config.second_stage_batch_size > first_stage_max_proposals): raise ValueError('second_stage_batch_size should be no greater than ' 'first_stage_max_proposals.') first_stage_non_max_suppression_fn = functools.partial( post_processing.batch_multiclass_non_max_suppression, score_thresh=frcnn_config.first_stage_nms_score_threshold, iou_thresh=frcnn_config.first_stage_nms_iou_threshold, max_size_per_class=frcnn_config.first_stage_max_proposals, max_total_size=frcnn_config.first_stage_max_proposals, use_static_shapes=use_static_shapes) first_stage_loc_loss_weight = ( frcnn_config.first_stage_localization_loss_weight) first_stage_obj_loss_weight = frcnn_config.first_stage_objectness_loss_weight initial_crop_size = frcnn_config.initial_crop_size maxpool_kernel_size = frcnn_config.maxpool_kernel_size maxpool_stride = frcnn_config.maxpool_stride second_stage_target_assigner = target_assigner.create_target_assigner( 'FasterRCNN', 'detection', use_matmul_gather=frcnn_config.use_matmul_gather_in_matcher) second_stage_box_predictor = box_predictor_builder.build( hyperparams_builder.build, frcnn_config.second_stage_box_predictor, is_training=is_training, num_classes=num_classes) second_stage_batch_size = frcnn_config.second_stage_batch_size second_stage_sampler = sampler.BalancedPositiveNegativeSampler( positive_fraction=frcnn_config.second_stage_balance_fraction, is_static=(frcnn_config.use_static_balanced_label_sampler and use_static_shapes)) (second_stage_non_max_suppression_fn, second_stage_score_conversion_fn ) = post_processing_builder.build(frcnn_config.second_stage_post_processing) second_stage_localization_loss_weight = ( frcnn_config.second_stage_localization_loss_weight) second_stage_classification_loss = ( losses_builder.build_faster_rcnn_classification_loss( frcnn_config.second_stage_classification_loss)) second_stage_classification_loss_weight = ( frcnn_config.second_stage_classification_loss_weight) second_stage_mask_prediction_loss_weight = ( frcnn_config.second_stage_mask_prediction_loss_weight) hard_example_miner = None if frcnn_config.HasField('hard_example_miner'): hard_example_miner = losses_builder.build_hard_example_miner( frcnn_config.hard_example_miner, second_stage_classification_loss_weight, second_stage_localization_loss_weight) crop_and_resize_fn = ( ops.matmul_crop_and_resize if frcnn_config.use_matmul_crop_and_resize else ops.native_crop_and_resize) clip_anchors_to_image = ( frcnn_config.clip_anchors_to_image) common_kwargs = { 'is_training': is_training, 'num_classes': num_classes, 'image_resizer_fn': image_resizer_fn, 'feature_extractor': feature_extractor, 'number_of_stages': number_of_stages, 'first_stage_anchor_generator': first_stage_anchor_generator, 'first_stage_target_assigner': first_stage_target_assigner, 'first_stage_atrous_rate': first_stage_atrous_rate, 'first_stage_box_predictor_arg_scope_fn': first_stage_box_predictor_arg_scope_fn, 'first_stage_box_predictor_kernel_size': first_stage_box_predictor_kernel_size, 'first_stage_box_predictor_depth': first_stage_box_predictor_depth, 'first_stage_minibatch_size': first_stage_minibatch_size, 'first_stage_sampler': first_stage_sampler, 'first_stage_non_max_suppression_fn': first_stage_non_max_suppression_fn, 'first_stage_max_proposals': first_stage_max_proposals, 'first_stage_localization_loss_weight': first_stage_loc_loss_weight, 'first_stage_objectness_loss_weight': first_stage_obj_loss_weight, 'second_stage_target_assigner': second_stage_target_assigner, 'second_stage_batch_size': second_stage_batch_size, 'second_stage_sampler': second_stage_sampler, 'second_stage_non_max_suppression_fn': second_stage_non_max_suppression_fn, 'second_stage_score_conversion_fn': second_stage_score_conversion_fn, 'second_stage_localization_loss_weight': second_stage_localization_loss_weight, 'second_stage_classification_loss': second_stage_classification_loss, 'second_stage_classification_loss_weight': second_stage_classification_loss_weight, 'hard_example_miner': hard_example_miner, 'add_summaries': add_summaries, 'crop_and_resize_fn': crop_and_resize_fn, 'clip_anchors_to_image': clip_anchors_to_image, 'use_static_shapes': use_static_shapes, 'resize_masks': frcnn_config.resize_masks } if isinstance(second_stage_box_predictor, rfcn_box_predictor.RfcnBoxPredictor): return rfcn_meta_arch.RFCNMetaArch( second_stage_rfcn_box_predictor=second_stage_box_predictor, **common_kwargs) else: return faster_rcnn_meta_arch.FasterRCNNMetaArch( initial_crop_size=initial_crop_size, maxpool_kernel_size=maxpool_kernel_size, maxpool_stride=maxpool_stride, second_stage_mask_rcnn_box_predictor=second_stage_box_predictor, second_stage_mask_prediction_loss_weight=( second_stage_mask_prediction_loss_weight), **common_kwargs)
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/builders/model_builder.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for box_predictor_builder.""" import mock import tensorflow as tf from google.protobuf import text_format from object_detection.builders import box_predictor_builder from object_detection.builders import hyperparams_builder from object_detection.predictors import mask_rcnn_box_predictor from object_detection.protos import box_predictor_pb2 from object_detection.protos import hyperparams_pb2 class ConvolutionalBoxPredictorBuilderTest(tf.test.TestCase): def test_box_predictor_calls_conv_argscope_fn(self): conv_hyperparams_text_proto = """ regularizer { l1_regularizer { weight: 0.0003 } } initializer { truncated_normal_initializer { mean: 0.0 stddev: 0.3 } } activation: RELU_6 """ hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto) def mock_conv_argscope_builder(conv_hyperparams_arg, is_training): return (conv_hyperparams_arg, is_training) box_predictor_proto = box_predictor_pb2.BoxPredictor() box_predictor_proto.convolutional_box_predictor.conv_hyperparams.CopyFrom( hyperparams_proto) box_predictor = box_predictor_builder.build( argscope_fn=mock_conv_argscope_builder, box_predictor_config=box_predictor_proto, is_training=False, num_classes=10) (conv_hyperparams_actual, is_training) = box_predictor._conv_hyperparams_fn self.assertAlmostEqual((hyperparams_proto.regularizer. l1_regularizer.weight), (conv_hyperparams_actual.regularizer.l1_regularizer. weight)) self.assertAlmostEqual((hyperparams_proto.initializer. truncated_normal_initializer.stddev), (conv_hyperparams_actual.initializer. truncated_normal_initializer.stddev)) self.assertAlmostEqual((hyperparams_proto.initializer. truncated_normal_initializer.mean), (conv_hyperparams_actual.initializer. truncated_normal_initializer.mean)) self.assertEqual(hyperparams_proto.activation, conv_hyperparams_actual.activation) self.assertFalse(is_training) def test_construct_non_default_conv_box_predictor(self): box_predictor_text_proto = """ convolutional_box_predictor { min_depth: 2 max_depth: 16 num_layers_before_predictor: 2 use_dropout: false dropout_keep_probability: 0.4 kernel_size: 3 box_code_size: 3 apply_sigmoid_to_scores: true class_prediction_bias_init: 4.0 use_depthwise: true } """ conv_hyperparams_text_proto = """ regularizer { l1_regularizer { } } initializer { truncated_normal_initializer { } } """ hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto) def mock_conv_argscope_builder(conv_hyperparams_arg, is_training): return (conv_hyperparams_arg, is_training) box_predictor_proto = box_predictor_pb2.BoxPredictor() text_format.Merge(box_predictor_text_proto, box_predictor_proto) box_predictor_proto.convolutional_box_predictor.conv_hyperparams.CopyFrom( hyperparams_proto) box_predictor = box_predictor_builder.build( argscope_fn=mock_conv_argscope_builder, box_predictor_config=box_predictor_proto, is_training=False, num_classes=10, add_background_class=False) class_head = box_predictor._class_prediction_head self.assertEqual(box_predictor._min_depth, 2) self.assertEqual(box_predictor._max_depth, 16) self.assertEqual(box_predictor._num_layers_before_predictor, 2) self.assertFalse(class_head._use_dropout) self.assertAlmostEqual(class_head._dropout_keep_prob, 0.4) self.assertTrue(class_head._apply_sigmoid_to_scores) self.assertAlmostEqual(class_head._class_prediction_bias_init, 4.0) self.assertEqual(class_head._num_class_slots, 10) self.assertEqual(box_predictor.num_classes, 10) self.assertFalse(box_predictor._is_training) self.assertTrue(class_head._use_depthwise) def test_construct_default_conv_box_predictor(self): box_predictor_text_proto = """ convolutional_box_predictor { conv_hyperparams { regularizer { l1_regularizer { } } initializer { truncated_normal_initializer { } } } }""" box_predictor_proto = box_predictor_pb2.BoxPredictor() text_format.Merge(box_predictor_text_proto, box_predictor_proto) box_predictor = box_predictor_builder.build( argscope_fn=hyperparams_builder.build, box_predictor_config=box_predictor_proto, is_training=True, num_classes=90) class_head = box_predictor._class_prediction_head self.assertEqual(box_predictor._min_depth, 0) self.assertEqual(box_predictor._max_depth, 0) self.assertEqual(box_predictor._num_layers_before_predictor, 0) self.assertTrue(class_head._use_dropout) self.assertAlmostEqual(class_head._dropout_keep_prob, 0.8) self.assertFalse(class_head._apply_sigmoid_to_scores) self.assertEqual(class_head._num_class_slots, 91) self.assertEqual(box_predictor.num_classes, 90) self.assertTrue(box_predictor._is_training) self.assertFalse(class_head._use_depthwise) class WeightSharedConvolutionalBoxPredictorBuilderTest(tf.test.TestCase): def test_box_predictor_calls_conv_argscope_fn(self): conv_hyperparams_text_proto = """ regularizer { l1_regularizer { weight: 0.0003 } } initializer { truncated_normal_initializer { mean: 0.0 stddev: 0.3 } } activation: RELU_6 """ hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto) def mock_conv_argscope_builder(conv_hyperparams_arg, is_training): return (conv_hyperparams_arg, is_training) box_predictor_proto = box_predictor_pb2.BoxPredictor() (box_predictor_proto.weight_shared_convolutional_box_predictor .conv_hyperparams.CopyFrom(hyperparams_proto)) box_predictor = box_predictor_builder.build( argscope_fn=mock_conv_argscope_builder, box_predictor_config=box_predictor_proto, is_training=False, num_classes=10) (conv_hyperparams_actual, is_training) = box_predictor._conv_hyperparams_fn self.assertAlmostEqual((hyperparams_proto.regularizer. l1_regularizer.weight), (conv_hyperparams_actual.regularizer.l1_regularizer. weight)) self.assertAlmostEqual((hyperparams_proto.initializer. truncated_normal_initializer.stddev), (conv_hyperparams_actual.initializer. truncated_normal_initializer.stddev)) self.assertAlmostEqual((hyperparams_proto.initializer. truncated_normal_initializer.mean), (conv_hyperparams_actual.initializer. truncated_normal_initializer.mean)) self.assertEqual(hyperparams_proto.activation, conv_hyperparams_actual.activation) self.assertFalse(is_training) def test_construct_non_default_conv_box_predictor(self): box_predictor_text_proto = """ weight_shared_convolutional_box_predictor { depth: 2 num_layers_before_predictor: 2 kernel_size: 7 box_code_size: 3 class_prediction_bias_init: 4.0 } """ conv_hyperparams_text_proto = """ regularizer { l1_regularizer { } } initializer { truncated_normal_initializer { } } """ hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto) def mock_conv_argscope_builder(conv_hyperparams_arg, is_training): return (conv_hyperparams_arg, is_training) box_predictor_proto = box_predictor_pb2.BoxPredictor() text_format.Merge(box_predictor_text_proto, box_predictor_proto) (box_predictor_proto.weight_shared_convolutional_box_predictor. conv_hyperparams.CopyFrom(hyperparams_proto)) box_predictor = box_predictor_builder.build( argscope_fn=mock_conv_argscope_builder, box_predictor_config=box_predictor_proto, is_training=False, num_classes=10, add_background_class=False) class_head = box_predictor._class_prediction_head self.assertEqual(box_predictor._depth, 2) self.assertEqual(box_predictor._num_layers_before_predictor, 2) self.assertAlmostEqual(class_head._class_prediction_bias_init, 4.0) self.assertEqual(box_predictor.num_classes, 10) self.assertFalse(box_predictor._is_training) self.assertEqual(box_predictor._apply_batch_norm, False) def test_construct_non_default_depthwise_conv_box_predictor(self): box_predictor_text_proto = """ weight_shared_convolutional_box_predictor { depth: 2 num_layers_before_predictor: 2 kernel_size: 7 box_code_size: 3 class_prediction_bias_init: 4.0 use_depthwise: true } """ conv_hyperparams_text_proto = """ regularizer { l1_regularizer { } } initializer { truncated_normal_initializer { } } """ hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto) def mock_conv_argscope_builder(conv_hyperparams_arg, is_training): return (conv_hyperparams_arg, is_training) box_predictor_proto = box_predictor_pb2.BoxPredictor() text_format.Merge(box_predictor_text_proto, box_predictor_proto) (box_predictor_proto.weight_shared_convolutional_box_predictor. conv_hyperparams.CopyFrom(hyperparams_proto)) box_predictor = box_predictor_builder.build( argscope_fn=mock_conv_argscope_builder, box_predictor_config=box_predictor_proto, is_training=False, num_classes=10, add_background_class=False) class_head = box_predictor._class_prediction_head self.assertEqual(box_predictor._depth, 2) self.assertEqual(box_predictor._num_layers_before_predictor, 2) self.assertEqual(box_predictor._apply_batch_norm, False) self.assertEqual(box_predictor._use_depthwise, True) self.assertAlmostEqual(class_head._class_prediction_bias_init, 4.0) self.assertEqual(box_predictor.num_classes, 10) self.assertFalse(box_predictor._is_training) def test_construct_default_conv_box_predictor(self): box_predictor_text_proto = """ weight_shared_convolutional_box_predictor { conv_hyperparams { regularizer { l1_regularizer { } } initializer { truncated_normal_initializer { } } } }""" box_predictor_proto = box_predictor_pb2.BoxPredictor() text_format.Merge(box_predictor_text_proto, box_predictor_proto) box_predictor = box_predictor_builder.build( argscope_fn=hyperparams_builder.build, box_predictor_config=box_predictor_proto, is_training=True, num_classes=90) self.assertEqual(box_predictor._depth, 0) self.assertEqual(box_predictor._num_layers_before_predictor, 0) self.assertEqual(box_predictor.num_classes, 90) self.assertTrue(box_predictor._is_training) self.assertEqual(box_predictor._apply_batch_norm, False) def test_construct_default_conv_box_predictor_with_batch_norm(self): box_predictor_text_proto = """ weight_shared_convolutional_box_predictor { conv_hyperparams { regularizer { l1_regularizer { } } batch_norm { train: true } initializer { truncated_normal_initializer { } } } }""" box_predictor_proto = box_predictor_pb2.BoxPredictor() text_format.Merge(box_predictor_text_proto, box_predictor_proto) box_predictor = box_predictor_builder.build( argscope_fn=hyperparams_builder.build, box_predictor_config=box_predictor_proto, is_training=True, num_classes=90) self.assertEqual(box_predictor._depth, 0) self.assertEqual(box_predictor._num_layers_before_predictor, 0) self.assertEqual(box_predictor.num_classes, 90) self.assertTrue(box_predictor._is_training) self.assertEqual(box_predictor._apply_batch_norm, True) class MaskRCNNBoxPredictorBuilderTest(tf.test.TestCase): def test_box_predictor_builder_calls_fc_argscope_fn(self): fc_hyperparams_text_proto = """ regularizer { l1_regularizer { weight: 0.0003 } } initializer { truncated_normal_initializer { mean: 0.0 stddev: 0.3 } } activation: RELU_6 op: FC """ hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(fc_hyperparams_text_proto, hyperparams_proto) box_predictor_proto = box_predictor_pb2.BoxPredictor() box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.CopyFrom( hyperparams_proto) mock_argscope_fn = mock.Mock(return_value='arg_scope') box_predictor = box_predictor_builder.build( argscope_fn=mock_argscope_fn, box_predictor_config=box_predictor_proto, is_training=False, num_classes=10) mock_argscope_fn.assert_called_with(hyperparams_proto, False) self.assertEqual(box_predictor._box_prediction_head._fc_hyperparams_fn, 'arg_scope') self.assertEqual(box_predictor._class_prediction_head._fc_hyperparams_fn, 'arg_scope') def test_non_default_mask_rcnn_box_predictor(self): fc_hyperparams_text_proto = """ regularizer { l1_regularizer { } } initializer { truncated_normal_initializer { } } activation: RELU_6 op: FC """ box_predictor_text_proto = """ mask_rcnn_box_predictor { use_dropout: true dropout_keep_probability: 0.8 box_code_size: 3 share_box_across_classes: true } """ hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(fc_hyperparams_text_proto, hyperparams_proto) def mock_fc_argscope_builder(fc_hyperparams_arg, is_training): return (fc_hyperparams_arg, is_training) box_predictor_proto = box_predictor_pb2.BoxPredictor() text_format.Merge(box_predictor_text_proto, box_predictor_proto) box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.CopyFrom( hyperparams_proto) box_predictor = box_predictor_builder.build( argscope_fn=mock_fc_argscope_builder, box_predictor_config=box_predictor_proto, is_training=True, num_classes=90) box_head = box_predictor._box_prediction_head class_head = box_predictor._class_prediction_head self.assertTrue(box_head._use_dropout) self.assertTrue(class_head._use_dropout) self.assertAlmostEqual(box_head._dropout_keep_prob, 0.8) self.assertAlmostEqual(class_head._dropout_keep_prob, 0.8) self.assertEqual(box_predictor.num_classes, 90) self.assertTrue(box_predictor._is_training) self.assertEqual(box_head._box_code_size, 3) self.assertEqual(box_head._share_box_across_classes, True) def test_build_default_mask_rcnn_box_predictor(self): box_predictor_proto = box_predictor_pb2.BoxPredictor() box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.op = ( hyperparams_pb2.Hyperparams.FC) box_predictor = box_predictor_builder.build( argscope_fn=mock.Mock(return_value='arg_scope'), box_predictor_config=box_predictor_proto, is_training=True, num_classes=90) box_head = box_predictor._box_prediction_head class_head = box_predictor._class_prediction_head self.assertFalse(box_head._use_dropout) self.assertFalse(class_head._use_dropout) self.assertAlmostEqual(box_head._dropout_keep_prob, 0.5) self.assertEqual(box_predictor.num_classes, 90) self.assertTrue(box_predictor._is_training) self.assertEqual(box_head._box_code_size, 4) self.assertEqual(len(box_predictor._third_stage_heads.keys()), 0) def test_build_box_predictor_with_mask_branch(self): box_predictor_proto = box_predictor_pb2.BoxPredictor() box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.op = ( hyperparams_pb2.Hyperparams.FC) box_predictor_proto.mask_rcnn_box_predictor.conv_hyperparams.op = ( hyperparams_pb2.Hyperparams.CONV) box_predictor_proto.mask_rcnn_box_predictor.predict_instance_masks = True box_predictor_proto.mask_rcnn_box_predictor.mask_prediction_conv_depth = 512 box_predictor_proto.mask_rcnn_box_predictor.mask_height = 16 box_predictor_proto.mask_rcnn_box_predictor.mask_width = 16 mock_argscope_fn = mock.Mock(return_value='arg_scope') box_predictor = box_predictor_builder.build( argscope_fn=mock_argscope_fn, box_predictor_config=box_predictor_proto, is_training=True, num_classes=90) mock_argscope_fn.assert_has_calls( [mock.call(box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams, True), mock.call(box_predictor_proto.mask_rcnn_box_predictor.conv_hyperparams, True)], any_order=True) box_head = box_predictor._box_prediction_head class_head = box_predictor._class_prediction_head third_stage_heads = box_predictor._third_stage_heads self.assertFalse(box_head._use_dropout) self.assertFalse(class_head._use_dropout) self.assertAlmostEqual(box_head._dropout_keep_prob, 0.5) self.assertAlmostEqual(class_head._dropout_keep_prob, 0.5) self.assertEqual(box_predictor.num_classes, 90) self.assertTrue(box_predictor._is_training) self.assertEqual(box_head._box_code_size, 4) self.assertTrue( mask_rcnn_box_predictor.MASK_PREDICTIONS in third_stage_heads) self.assertEqual( third_stage_heads[mask_rcnn_box_predictor.MASK_PREDICTIONS] ._mask_prediction_conv_depth, 512) def test_build_box_predictor_with_convlve_then_upsample_masks(self): box_predictor_proto = box_predictor_pb2.BoxPredictor() box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams.op = ( hyperparams_pb2.Hyperparams.FC) box_predictor_proto.mask_rcnn_box_predictor.conv_hyperparams.op = ( hyperparams_pb2.Hyperparams.CONV) box_predictor_proto.mask_rcnn_box_predictor.predict_instance_masks = True box_predictor_proto.mask_rcnn_box_predictor.mask_prediction_conv_depth = 512 box_predictor_proto.mask_rcnn_box_predictor.mask_height = 24 box_predictor_proto.mask_rcnn_box_predictor.mask_width = 24 box_predictor_proto.mask_rcnn_box_predictor.convolve_then_upsample_masks = ( True) mock_argscope_fn = mock.Mock(return_value='arg_scope') box_predictor = box_predictor_builder.build( argscope_fn=mock_argscope_fn, box_predictor_config=box_predictor_proto, is_training=True, num_classes=90) mock_argscope_fn.assert_has_calls( [mock.call(box_predictor_proto.mask_rcnn_box_predictor.fc_hyperparams, True), mock.call(box_predictor_proto.mask_rcnn_box_predictor.conv_hyperparams, True)], any_order=True) box_head = box_predictor._box_prediction_head class_head = box_predictor._class_prediction_head third_stage_heads = box_predictor._third_stage_heads self.assertFalse(box_head._use_dropout) self.assertFalse(class_head._use_dropout) self.assertAlmostEqual(box_head._dropout_keep_prob, 0.5) self.assertAlmostEqual(class_head._dropout_keep_prob, 0.5) self.assertEqual(box_predictor.num_classes, 90) self.assertTrue(box_predictor._is_training) self.assertEqual(box_head._box_code_size, 4) self.assertTrue( mask_rcnn_box_predictor.MASK_PREDICTIONS in third_stage_heads) self.assertEqual( third_stage_heads[mask_rcnn_box_predictor.MASK_PREDICTIONS] ._mask_prediction_conv_depth, 512) self.assertTrue(third_stage_heads[mask_rcnn_box_predictor.MASK_PREDICTIONS] ._convolve_then_upsample) class RfcnBoxPredictorBuilderTest(tf.test.TestCase): def test_box_predictor_calls_fc_argscope_fn(self): conv_hyperparams_text_proto = """ regularizer { l1_regularizer { weight: 0.0003 } } initializer { truncated_normal_initializer { mean: 0.0 stddev: 0.3 } } activation: RELU_6 """ hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto) def mock_conv_argscope_builder(conv_hyperparams_arg, is_training): return (conv_hyperparams_arg, is_training) box_predictor_proto = box_predictor_pb2.BoxPredictor() box_predictor_proto.rfcn_box_predictor.conv_hyperparams.CopyFrom( hyperparams_proto) box_predictor = box_predictor_builder.build( argscope_fn=mock_conv_argscope_builder, box_predictor_config=box_predictor_proto, is_training=False, num_classes=10) (conv_hyperparams_actual, is_training) = box_predictor._conv_hyperparams_fn self.assertAlmostEqual((hyperparams_proto.regularizer. l1_regularizer.weight), (conv_hyperparams_actual.regularizer.l1_regularizer. weight)) self.assertAlmostEqual((hyperparams_proto.initializer. truncated_normal_initializer.stddev), (conv_hyperparams_actual.initializer. truncated_normal_initializer.stddev)) self.assertAlmostEqual((hyperparams_proto.initializer. truncated_normal_initializer.mean), (conv_hyperparams_actual.initializer. truncated_normal_initializer.mean)) self.assertEqual(hyperparams_proto.activation, conv_hyperparams_actual.activation) self.assertFalse(is_training) def test_non_default_rfcn_box_predictor(self): conv_hyperparams_text_proto = """ regularizer { l1_regularizer { } } initializer { truncated_normal_initializer { } } activation: RELU_6 """ box_predictor_text_proto = """ rfcn_box_predictor { num_spatial_bins_height: 4 num_spatial_bins_width: 4 depth: 4 box_code_size: 3 crop_height: 16 crop_width: 16 } """ hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto) def mock_conv_argscope_builder(conv_hyperparams_arg, is_training): return (conv_hyperparams_arg, is_training) box_predictor_proto = box_predictor_pb2.BoxPredictor() text_format.Merge(box_predictor_text_proto, box_predictor_proto) box_predictor_proto.rfcn_box_predictor.conv_hyperparams.CopyFrom( hyperparams_proto) box_predictor = box_predictor_builder.build( argscope_fn=mock_conv_argscope_builder, box_predictor_config=box_predictor_proto, is_training=True, num_classes=90) self.assertEqual(box_predictor.num_classes, 90) self.assertTrue(box_predictor._is_training) self.assertEqual(box_predictor._box_code_size, 3) self.assertEqual(box_predictor._num_spatial_bins, [4, 4]) self.assertEqual(box_predictor._crop_size, [16, 16]) def test_default_rfcn_box_predictor(self): conv_hyperparams_text_proto = """ regularizer { l1_regularizer { } } initializer { truncated_normal_initializer { } } activation: RELU_6 """ hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, hyperparams_proto) def mock_conv_argscope_builder(conv_hyperparams_arg, is_training): return (conv_hyperparams_arg, is_training) box_predictor_proto = box_predictor_pb2.BoxPredictor() box_predictor_proto.rfcn_box_predictor.conv_hyperparams.CopyFrom( hyperparams_proto) box_predictor = box_predictor_builder.build( argscope_fn=mock_conv_argscope_builder, box_predictor_config=box_predictor_proto, is_training=True, num_classes=90) self.assertEqual(box_predictor.num_classes, 90) self.assertTrue(box_predictor._is_training) self.assertEqual(box_predictor._box_code_size, 4) self.assertEqual(box_predictor._num_spatial_bins, [3, 3]) self.assertEqual(box_predictor._crop_size, [12, 12]) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/builders/box_predictor_builder_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A function to build an object detection matcher from configuration.""" from object_detection.matchers import argmax_matcher from object_detection.matchers import bipartite_matcher from object_detection.protos import matcher_pb2 def build(matcher_config): """Builds a matcher object based on the matcher config. Args: matcher_config: A matcher.proto object containing the config for the desired Matcher. Returns: Matcher based on the config. Raises: ValueError: On empty matcher proto. """ if not isinstance(matcher_config, matcher_pb2.Matcher): raise ValueError('matcher_config not of type matcher_pb2.Matcher.') if matcher_config.WhichOneof('matcher_oneof') == 'argmax_matcher': matcher = matcher_config.argmax_matcher matched_threshold = unmatched_threshold = None if not matcher.ignore_thresholds: matched_threshold = matcher.matched_threshold unmatched_threshold = matcher.unmatched_threshold return argmax_matcher.ArgMaxMatcher( matched_threshold=matched_threshold, unmatched_threshold=unmatched_threshold, negatives_lower_than_unmatched=matcher.negatives_lower_than_unmatched, force_match_for_each_row=matcher.force_match_for_each_row, use_matmul_gather=matcher.use_matmul_gather) if matcher_config.WhichOneof('matcher_oneof') == 'bipartite_matcher': matcher = matcher_config.bipartite_matcher return bipartite_matcher.GreedyBipartiteMatcher(matcher.use_matmul_gather) raise ValueError('Empty matcher.')
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/builders/matcher_builder.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.models.model_builder.""" from absl.testing import parameterized import tensorflow as tf from google.protobuf import text_format from object_detection.builders import model_builder from object_detection.meta_architectures import faster_rcnn_meta_arch from object_detection.meta_architectures import rfcn_meta_arch from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import faster_rcnn_inception_resnet_v2_feature_extractor as frcnn_inc_res from object_detection.models import faster_rcnn_inception_v2_feature_extractor as frcnn_inc_v2 from object_detection.models import faster_rcnn_nas_feature_extractor as frcnn_nas from object_detection.models import faster_rcnn_pnas_feature_extractor as frcnn_pnas from object_detection.models import faster_rcnn_resnet_v1_feature_extractor as frcnn_resnet_v1 from object_detection.models import ssd_resnet_v1_fpn_feature_extractor as ssd_resnet_v1_fpn from object_detection.models import ssd_resnet_v1_ppn_feature_extractor as ssd_resnet_v1_ppn from object_detection.models.embedded_ssd_mobilenet_v1_feature_extractor import EmbeddedSSDMobileNetV1FeatureExtractor from object_detection.models.ssd_inception_v2_feature_extractor import SSDInceptionV2FeatureExtractor from object_detection.models.ssd_inception_v3_feature_extractor import SSDInceptionV3FeatureExtractor from object_detection.models.ssd_mobilenet_v1_feature_extractor import SSDMobileNetV1FeatureExtractor from object_detection.models.ssd_mobilenet_v1_fpn_feature_extractor import SSDMobileNetV1FpnFeatureExtractor from object_detection.models.ssd_mobilenet_v1_ppn_feature_extractor import SSDMobileNetV1PpnFeatureExtractor from object_detection.models.ssd_mobilenet_v2_feature_extractor import SSDMobileNetV2FeatureExtractor from object_detection.models.ssd_mobilenet_v2_fpn_feature_extractor import SSDMobileNetV2FpnFeatureExtractor from object_detection.models.ssd_mobilenet_v2_keras_feature_extractor import SSDMobileNetV2KerasFeatureExtractor from object_detection.predictors import convolutional_box_predictor from object_detection.predictors import convolutional_keras_box_predictor from object_detection.protos import model_pb2 FRCNN_RESNET_FEAT_MAPS = { 'faster_rcnn_resnet50': frcnn_resnet_v1.FasterRCNNResnet50FeatureExtractor, 'faster_rcnn_resnet101': frcnn_resnet_v1.FasterRCNNResnet101FeatureExtractor, 'faster_rcnn_resnet152': frcnn_resnet_v1.FasterRCNNResnet152FeatureExtractor } SSD_RESNET_V1_FPN_FEAT_MAPS = { 'ssd_resnet50_v1_fpn': ssd_resnet_v1_fpn.SSDResnet50V1FpnFeatureExtractor, 'ssd_resnet101_v1_fpn': ssd_resnet_v1_fpn.SSDResnet101V1FpnFeatureExtractor, 'ssd_resnet152_v1_fpn': ssd_resnet_v1_fpn.SSDResnet152V1FpnFeatureExtractor, } SSD_RESNET_V1_PPN_FEAT_MAPS = { 'ssd_resnet50_v1_ppn': ssd_resnet_v1_ppn.SSDResnet50V1PpnFeatureExtractor, 'ssd_resnet101_v1_ppn': ssd_resnet_v1_ppn.SSDResnet101V1PpnFeatureExtractor, 'ssd_resnet152_v1_ppn': ssd_resnet_v1_ppn.SSDResnet152V1PpnFeatureExtractor } class ModelBuilderTest(tf.test.TestCase, parameterized.TestCase): def create_model(self, model_config): """Builds a DetectionModel based on the model config. Args: model_config: A model.proto object containing the config for the desired DetectionModel. Returns: DetectionModel based on the config. """ return model_builder.build(model_config, is_training=True) def test_create_ssd_inception_v2_model_from_config(self): model_text_proto = """ ssd { feature_extractor { type: 'ssd_inception_v2' conv_hyperparams { regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } override_base_feature_extractor_hyperparams: true } box_coder { faster_rcnn_box_coder { } } matcher { argmax_matcher { } } similarity_calculator { iou_similarity { } } anchor_generator { ssd_anchor_generator { aspect_ratios: 1.0 } } image_resizer { fixed_shape_resizer { height: 320 width: 320 } } box_predictor { convolutional_box_predictor { conv_hyperparams { regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } } } loss { classification_loss { weighted_softmax { } } localization_loss { weighted_smooth_l1 { } } } }""" model_proto = model_pb2.DetectionModel() text_format.Merge(model_text_proto, model_proto) model = self.create_model(model_proto) self.assertIsInstance(model, ssd_meta_arch.SSDMetaArch) self.assertIsInstance(model._feature_extractor, SSDInceptionV2FeatureExtractor) self.assertIsNone(model._expected_loss_weights_fn) def test_create_ssd_inception_v3_model_from_config(self): model_text_proto = """ ssd { feature_extractor { type: 'ssd_inception_v3' conv_hyperparams { regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } override_base_feature_extractor_hyperparams: true } box_coder { faster_rcnn_box_coder { } } matcher { argmax_matcher { } } similarity_calculator { iou_similarity { } } anchor_generator { ssd_anchor_generator { aspect_ratios: 1.0 } } image_resizer { fixed_shape_resizer { height: 320 width: 320 } } box_predictor { convolutional_box_predictor { conv_hyperparams { regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } } } loss { classification_loss { weighted_softmax { } } localization_loss { weighted_smooth_l1 { } } } }""" model_proto = model_pb2.DetectionModel() text_format.Merge(model_text_proto, model_proto) model = self.create_model(model_proto) self.assertIsInstance(model, ssd_meta_arch.SSDMetaArch) self.assertIsInstance(model._feature_extractor, SSDInceptionV3FeatureExtractor) def test_create_ssd_resnet_v1_fpn_model_from_config(self): model_text_proto = """ ssd { feature_extractor { type: 'ssd_resnet50_v1_fpn' fpn { min_level: 3 max_level: 7 } conv_hyperparams { regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } } box_coder { faster_rcnn_box_coder { } } matcher { argmax_matcher { } } similarity_calculator { iou_similarity { } } encode_background_as_zeros: true anchor_generator { multiscale_anchor_generator { aspect_ratios: [1.0, 2.0, 0.5] scales_per_octave: 2 } } image_resizer { fixed_shape_resizer { height: 320 width: 320 } } box_predictor { weight_shared_convolutional_box_predictor { depth: 32 conv_hyperparams { regularizer { l2_regularizer { } } initializer { random_normal_initializer { } } } num_layers_before_predictor: 1 } } normalize_loss_by_num_matches: true normalize_loc_loss_by_codesize: true loss { classification_loss { weighted_sigmoid_focal { alpha: 0.25 gamma: 2.0 } } localization_loss { weighted_smooth_l1 { delta: 0.1 } } classification_weight: 1.0 localization_weight: 1.0 } }""" model_proto = model_pb2.DetectionModel() text_format.Merge(model_text_proto, model_proto) for extractor_type, extractor_class in SSD_RESNET_V1_FPN_FEAT_MAPS.items(): model_proto.ssd.feature_extractor.type = extractor_type model = model_builder.build(model_proto, is_training=True) self.assertIsInstance(model, ssd_meta_arch.SSDMetaArch) self.assertIsInstance(model._feature_extractor, extractor_class) def test_create_ssd_resnet_v1_ppn_model_from_config(self): model_text_proto = """ ssd { feature_extractor { type: 'ssd_resnet_v1_50_ppn' conv_hyperparams { regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } } box_coder { mean_stddev_box_coder { } } matcher { bipartite_matcher { } } similarity_calculator { iou_similarity { } } anchor_generator { ssd_anchor_generator { aspect_ratios: 1.0 } } image_resizer { fixed_shape_resizer { height: 320 width: 320 } } box_predictor { weight_shared_convolutional_box_predictor { depth: 1024 class_prediction_bias_init: -4.6 conv_hyperparams { activation: RELU_6, regularizer { l2_regularizer { weight: 0.0004 } } initializer { variance_scaling_initializer { } } } num_layers_before_predictor: 2 kernel_size: 1 } } loss { classification_loss { weighted_softmax { } } localization_loss { weighted_l2 { } } classification_weight: 1.0 localization_weight: 1.0 } }""" model_proto = model_pb2.DetectionModel() text_format.Merge(model_text_proto, model_proto) for extractor_type, extractor_class in SSD_RESNET_V1_PPN_FEAT_MAPS.items(): model_proto.ssd.feature_extractor.type = extractor_type model = model_builder.build(model_proto, is_training=True) self.assertIsInstance(model, ssd_meta_arch.SSDMetaArch) self.assertIsInstance(model._feature_extractor, extractor_class) def test_create_ssd_mobilenet_v1_model_from_config(self): model_text_proto = """ ssd { freeze_batchnorm: true inplace_batchnorm_update: true feature_extractor { type: 'ssd_mobilenet_v1' conv_hyperparams { regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } } box_coder { faster_rcnn_box_coder { } } matcher { argmax_matcher { } } similarity_calculator { iou_similarity { } } anchor_generator { ssd_anchor_generator { aspect_ratios: 1.0 } } image_resizer { fixed_shape_resizer { height: 320 width: 320 } } box_predictor { convolutional_box_predictor { conv_hyperparams { regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } } } normalize_loc_loss_by_codesize: true loss { classification_loss { weighted_softmax { } } localization_loss { weighted_smooth_l1 { } } } }""" model_proto = model_pb2.DetectionModel() text_format.Merge(model_text_proto, model_proto) model = self.create_model(model_proto) self.assertIsInstance(model, ssd_meta_arch.SSDMetaArch) self.assertIsInstance(model._feature_extractor, SSDMobileNetV1FeatureExtractor) self.assertTrue(model._normalize_loc_loss_by_codesize) self.assertTrue(model._freeze_batchnorm) self.assertTrue(model._inplace_batchnorm_update) def test_create_ssd_mobilenet_v1_fpn_model_from_config(self): model_text_proto = """ ssd { freeze_batchnorm: true inplace_batchnorm_update: true feature_extractor { type: 'ssd_mobilenet_v1_fpn' fpn { min_level: 3 max_level: 7 } conv_hyperparams { regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } } box_coder { faster_rcnn_box_coder { } } matcher { argmax_matcher { } } similarity_calculator { iou_similarity { } } anchor_generator { ssd_anchor_generator { aspect_ratios: 1.0 } } image_resizer { fixed_shape_resizer { height: 320 width: 320 } } box_predictor { convolutional_box_predictor { conv_hyperparams { regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } } } normalize_loc_loss_by_codesize: true loss { classification_loss { weighted_softmax { } } localization_loss { weighted_smooth_l1 { } } } }""" model_proto = model_pb2.DetectionModel() text_format.Merge(model_text_proto, model_proto) model = self.create_model(model_proto) self.assertIsInstance(model, ssd_meta_arch.SSDMetaArch) self.assertIsInstance(model._feature_extractor, SSDMobileNetV1FpnFeatureExtractor) self.assertTrue(model._normalize_loc_loss_by_codesize) self.assertTrue(model._freeze_batchnorm) self.assertTrue(model._inplace_batchnorm_update) def test_create_ssd_mobilenet_v1_ppn_model_from_config(self): model_text_proto = """ ssd { freeze_batchnorm: true inplace_batchnorm_update: true feature_extractor { type: 'ssd_mobilenet_v1_ppn' conv_hyperparams { regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } } box_coder { faster_rcnn_box_coder { } } matcher { argmax_matcher { } } similarity_calculator { iou_similarity { } } anchor_generator { ssd_anchor_generator { aspect_ratios: 1.0 } } image_resizer { fixed_shape_resizer { height: 320 width: 320 } } box_predictor { convolutional_box_predictor { conv_hyperparams { regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } } } normalize_loc_loss_by_codesize: true loss { classification_loss { weighted_softmax { } } localization_loss { weighted_smooth_l1 { } } } }""" model_proto = model_pb2.DetectionModel() text_format.Merge(model_text_proto, model_proto) model = self.create_model(model_proto) self.assertIsInstance(model, ssd_meta_arch.SSDMetaArch) self.assertIsInstance(model._feature_extractor, SSDMobileNetV1PpnFeatureExtractor) self.assertTrue(model._normalize_loc_loss_by_codesize) self.assertTrue(model._freeze_batchnorm) self.assertTrue(model._inplace_batchnorm_update) def test_create_ssd_mobilenet_v2_model_from_config(self): model_text_proto = """ ssd { feature_extractor { type: 'ssd_mobilenet_v2' conv_hyperparams { regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } } box_coder { faster_rcnn_box_coder { } } matcher { argmax_matcher { } } similarity_calculator { iou_similarity { } } anchor_generator { ssd_anchor_generator { aspect_ratios: 1.0 } } image_resizer { fixed_shape_resizer { height: 320 width: 320 } } box_predictor { convolutional_box_predictor { conv_hyperparams { regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } } } normalize_loc_loss_by_codesize: true loss { classification_loss { weighted_softmax { } } localization_loss { weighted_smooth_l1 { } } } }""" model_proto = model_pb2.DetectionModel() text_format.Merge(model_text_proto, model_proto) model = self.create_model(model_proto) self.assertIsInstance(model, ssd_meta_arch.SSDMetaArch) self.assertIsInstance(model._feature_extractor, SSDMobileNetV2FeatureExtractor) self.assertIsInstance(model._box_predictor, convolutional_box_predictor.ConvolutionalBoxPredictor) self.assertTrue(model._normalize_loc_loss_by_codesize) def test_create_ssd_mobilenet_v2_keras_model_from_config(self): model_text_proto = """ ssd { feature_extractor { type: 'ssd_mobilenet_v2_keras' conv_hyperparams { regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } } box_coder { faster_rcnn_box_coder { } } matcher { argmax_matcher { } } similarity_calculator { iou_similarity { } } anchor_generator { ssd_anchor_generator { aspect_ratios: 1.0 } } image_resizer { fixed_shape_resizer { height: 320 width: 320 } } box_predictor { convolutional_box_predictor { conv_hyperparams { regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } } } normalize_loc_loss_by_codesize: true loss { classification_loss { weighted_softmax { } } localization_loss { weighted_smooth_l1 { } } } }""" model_proto = model_pb2.DetectionModel() text_format.Merge(model_text_proto, model_proto) model = self.create_model(model_proto) self.assertIsInstance(model, ssd_meta_arch.SSDMetaArch) self.assertIsInstance(model._feature_extractor, SSDMobileNetV2KerasFeatureExtractor) self.assertIsInstance( model._box_predictor, convolutional_keras_box_predictor.ConvolutionalBoxPredictor) self.assertTrue(model._normalize_loc_loss_by_codesize) def test_create_ssd_mobilenet_v2_fpn_model_from_config(self): model_text_proto = """ ssd { freeze_batchnorm: true inplace_batchnorm_update: true feature_extractor { type: 'ssd_mobilenet_v2_fpn' fpn { min_level: 3 max_level: 7 } conv_hyperparams { regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } } box_coder { faster_rcnn_box_coder { } } matcher { argmax_matcher { } } similarity_calculator { iou_similarity { } } anchor_generator { ssd_anchor_generator { aspect_ratios: 1.0 } } image_resizer { fixed_shape_resizer { height: 320 width: 320 } } box_predictor { convolutional_box_predictor { conv_hyperparams { regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } } } normalize_loc_loss_by_codesize: true loss { classification_loss { weighted_softmax { } } localization_loss { weighted_smooth_l1 { } } } }""" model_proto = model_pb2.DetectionModel() text_format.Merge(model_text_proto, model_proto) model = self.create_model(model_proto) self.assertIsInstance(model, ssd_meta_arch.SSDMetaArch) self.assertIsInstance(model._feature_extractor, SSDMobileNetV2FpnFeatureExtractor) self.assertTrue(model._normalize_loc_loss_by_codesize) self.assertTrue(model._freeze_batchnorm) self.assertTrue(model._inplace_batchnorm_update) def test_create_ssd_mobilenet_v2_fpnlite_model_from_config(self): model_text_proto = """ ssd { freeze_batchnorm: true inplace_batchnorm_update: true feature_extractor { type: 'ssd_mobilenet_v2_fpn' use_depthwise: true fpn { min_level: 3 max_level: 7 additional_layer_depth: 128 } conv_hyperparams { regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } } box_coder { faster_rcnn_box_coder { } } matcher { argmax_matcher { } } similarity_calculator { iou_similarity { } } anchor_generator { ssd_anchor_generator { aspect_ratios: 1.0 } } image_resizer { fixed_shape_resizer { height: 320 width: 320 } } box_predictor { convolutional_box_predictor { conv_hyperparams { regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } } } normalize_loc_loss_by_codesize: true loss { classification_loss { weighted_softmax { } } localization_loss { weighted_smooth_l1 { } } } }""" model_proto = model_pb2.DetectionModel() text_format.Merge(model_text_proto, model_proto) model = self.create_model(model_proto) self.assertIsInstance(model, ssd_meta_arch.SSDMetaArch) self.assertIsInstance(model._feature_extractor, SSDMobileNetV2FpnFeatureExtractor) self.assertTrue(model._normalize_loc_loss_by_codesize) self.assertTrue(model._freeze_batchnorm) self.assertTrue(model._inplace_batchnorm_update) def test_create_embedded_ssd_mobilenet_v1_model_from_config(self): model_text_proto = """ ssd { feature_extractor { type: 'embedded_ssd_mobilenet_v1' conv_hyperparams { regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } } box_coder { faster_rcnn_box_coder { } } matcher { argmax_matcher { } } similarity_calculator { iou_similarity { } } anchor_generator { ssd_anchor_generator { aspect_ratios: 1.0 } } image_resizer { fixed_shape_resizer { height: 256 width: 256 } } box_predictor { convolutional_box_predictor { conv_hyperparams { regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } } } loss { classification_loss { weighted_softmax { } } localization_loss { weighted_smooth_l1 { } } } }""" model_proto = model_pb2.DetectionModel() text_format.Merge(model_text_proto, model_proto) model = self.create_model(model_proto) self.assertIsInstance(model, ssd_meta_arch.SSDMetaArch) self.assertIsInstance(model._feature_extractor, EmbeddedSSDMobileNetV1FeatureExtractor) def test_create_faster_rcnn_resnet_v1_models_from_config(self): model_text_proto = """ faster_rcnn { inplace_batchnorm_update: false num_classes: 3 image_resizer { keep_aspect_ratio_resizer { min_dimension: 600 max_dimension: 1024 } } feature_extractor { type: 'faster_rcnn_resnet101' } first_stage_anchor_generator { grid_anchor_generator { scales: [0.25, 0.5, 1.0, 2.0] aspect_ratios: [0.5, 1.0, 2.0] height_stride: 16 width_stride: 16 } } first_stage_box_predictor_conv_hyperparams { regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } initial_crop_size: 14 maxpool_kernel_size: 2 maxpool_stride: 2 second_stage_box_predictor { mask_rcnn_box_predictor { fc_hyperparams { op: FC regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } } } second_stage_post_processing { batch_non_max_suppression { score_threshold: 0.01 iou_threshold: 0.6 max_detections_per_class: 100 max_total_detections: 300 } score_converter: SOFTMAX } }""" model_proto = model_pb2.DetectionModel() text_format.Merge(model_text_proto, model_proto) for extractor_type, extractor_class in FRCNN_RESNET_FEAT_MAPS.items(): model_proto.faster_rcnn.feature_extractor.type = extractor_type model = model_builder.build(model_proto, is_training=True) self.assertIsInstance(model, faster_rcnn_meta_arch.FasterRCNNMetaArch) self.assertIsInstance(model._feature_extractor, extractor_class) @parameterized.parameters( {'use_matmul_crop_and_resize': False}, {'use_matmul_crop_and_resize': True}, ) def test_create_faster_rcnn_resnet101_with_mask_prediction_enabled( self, use_matmul_crop_and_resize): model_text_proto = """ faster_rcnn { num_classes: 3 image_resizer { keep_aspect_ratio_resizer { min_dimension: 600 max_dimension: 1024 } } feature_extractor { type: 'faster_rcnn_resnet101' } first_stage_anchor_generator { grid_anchor_generator { scales: [0.25, 0.5, 1.0, 2.0] aspect_ratios: [0.5, 1.0, 2.0] height_stride: 16 width_stride: 16 } } first_stage_box_predictor_conv_hyperparams { regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } initial_crop_size: 14 maxpool_kernel_size: 2 maxpool_stride: 2 second_stage_box_predictor { mask_rcnn_box_predictor { fc_hyperparams { op: FC regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } conv_hyperparams { regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } predict_instance_masks: true } } second_stage_mask_prediction_loss_weight: 3.0 second_stage_post_processing { batch_non_max_suppression { score_threshold: 0.01 iou_threshold: 0.6 max_detections_per_class: 100 max_total_detections: 300 } score_converter: SOFTMAX } }""" model_proto = model_pb2.DetectionModel() text_format.Merge(model_text_proto, model_proto) model_proto.faster_rcnn.use_matmul_crop_and_resize = ( use_matmul_crop_and_resize) model = model_builder.build(model_proto, is_training=True) self.assertAlmostEqual(model._second_stage_mask_loss_weight, 3.0) def test_create_faster_rcnn_nas_model_from_config(self): model_text_proto = """ faster_rcnn { num_classes: 3 image_resizer { keep_aspect_ratio_resizer { min_dimension: 600 max_dimension: 1024 } } feature_extractor { type: 'faster_rcnn_nas' } first_stage_anchor_generator { grid_anchor_generator { scales: [0.25, 0.5, 1.0, 2.0] aspect_ratios: [0.5, 1.0, 2.0] height_stride: 16 width_stride: 16 } } first_stage_box_predictor_conv_hyperparams { regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } initial_crop_size: 17 maxpool_kernel_size: 1 maxpool_stride: 1 second_stage_box_predictor { mask_rcnn_box_predictor { fc_hyperparams { op: FC regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } } } second_stage_post_processing { batch_non_max_suppression { score_threshold: 0.01 iou_threshold: 0.6 max_detections_per_class: 100 max_total_detections: 300 } score_converter: SOFTMAX } }""" model_proto = model_pb2.DetectionModel() text_format.Merge(model_text_proto, model_proto) model = model_builder.build(model_proto, is_training=True) self.assertIsInstance(model, faster_rcnn_meta_arch.FasterRCNNMetaArch) self.assertIsInstance( model._feature_extractor, frcnn_nas.FasterRCNNNASFeatureExtractor) def test_create_faster_rcnn_pnas_model_from_config(self): model_text_proto = """ faster_rcnn { num_classes: 3 image_resizer { keep_aspect_ratio_resizer { min_dimension: 600 max_dimension: 1024 } } feature_extractor { type: 'faster_rcnn_pnas' } first_stage_anchor_generator { grid_anchor_generator { scales: [0.25, 0.5, 1.0, 2.0] aspect_ratios: [0.5, 1.0, 2.0] height_stride: 16 width_stride: 16 } } first_stage_box_predictor_conv_hyperparams { regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } initial_crop_size: 17 maxpool_kernel_size: 1 maxpool_stride: 1 second_stage_box_predictor { mask_rcnn_box_predictor { fc_hyperparams { op: FC regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } } } second_stage_post_processing { batch_non_max_suppression { score_threshold: 0.01 iou_threshold: 0.6 max_detections_per_class: 100 max_total_detections: 300 } score_converter: SOFTMAX } }""" model_proto = model_pb2.DetectionModel() text_format.Merge(model_text_proto, model_proto) model = model_builder.build(model_proto, is_training=True) self.assertIsInstance(model, faster_rcnn_meta_arch.FasterRCNNMetaArch) self.assertIsInstance( model._feature_extractor, frcnn_pnas.FasterRCNNPNASFeatureExtractor) def test_create_faster_rcnn_inception_resnet_v2_model_from_config(self): model_text_proto = """ faster_rcnn { num_classes: 3 image_resizer { keep_aspect_ratio_resizer { min_dimension: 600 max_dimension: 1024 } } feature_extractor { type: 'faster_rcnn_inception_resnet_v2' } first_stage_anchor_generator { grid_anchor_generator { scales: [0.25, 0.5, 1.0, 2.0] aspect_ratios: [0.5, 1.0, 2.0] height_stride: 16 width_stride: 16 } } first_stage_box_predictor_conv_hyperparams { regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } initial_crop_size: 17 maxpool_kernel_size: 1 maxpool_stride: 1 second_stage_box_predictor { mask_rcnn_box_predictor { fc_hyperparams { op: FC regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } } } second_stage_post_processing { batch_non_max_suppression { score_threshold: 0.01 iou_threshold: 0.6 max_detections_per_class: 100 max_total_detections: 300 } score_converter: SOFTMAX } }""" model_proto = model_pb2.DetectionModel() text_format.Merge(model_text_proto, model_proto) model = model_builder.build(model_proto, is_training=True) self.assertIsInstance(model, faster_rcnn_meta_arch.FasterRCNNMetaArch) self.assertIsInstance( model._feature_extractor, frcnn_inc_res.FasterRCNNInceptionResnetV2FeatureExtractor) def test_create_faster_rcnn_inception_v2_model_from_config(self): model_text_proto = """ faster_rcnn { num_classes: 3 image_resizer { keep_aspect_ratio_resizer { min_dimension: 600 max_dimension: 1024 } } feature_extractor { type: 'faster_rcnn_inception_v2' } first_stage_anchor_generator { grid_anchor_generator { scales: [0.25, 0.5, 1.0, 2.0] aspect_ratios: [0.5, 1.0, 2.0] height_stride: 16 width_stride: 16 } } first_stage_box_predictor_conv_hyperparams { regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } initial_crop_size: 14 maxpool_kernel_size: 2 maxpool_stride: 2 second_stage_box_predictor { mask_rcnn_box_predictor { fc_hyperparams { op: FC regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } } } second_stage_post_processing { batch_non_max_suppression { score_threshold: 0.01 iou_threshold: 0.6 max_detections_per_class: 100 max_total_detections: 300 } score_converter: SOFTMAX } }""" model_proto = model_pb2.DetectionModel() text_format.Merge(model_text_proto, model_proto) model = model_builder.build(model_proto, is_training=True) self.assertIsInstance(model, faster_rcnn_meta_arch.FasterRCNNMetaArch) self.assertIsInstance(model._feature_extractor, frcnn_inc_v2.FasterRCNNInceptionV2FeatureExtractor) def test_create_faster_rcnn_model_from_config_with_example_miner(self): model_text_proto = """ faster_rcnn { num_classes: 3 feature_extractor { type: 'faster_rcnn_inception_resnet_v2' } image_resizer { keep_aspect_ratio_resizer { min_dimension: 600 max_dimension: 1024 } } first_stage_anchor_generator { grid_anchor_generator { scales: [0.25, 0.5, 1.0, 2.0] aspect_ratios: [0.5, 1.0, 2.0] height_stride: 16 width_stride: 16 } } first_stage_box_predictor_conv_hyperparams { regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } second_stage_box_predictor { mask_rcnn_box_predictor { fc_hyperparams { op: FC regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } } } hard_example_miner { num_hard_examples: 10 iou_threshold: 0.99 } }""" model_proto = model_pb2.DetectionModel() text_format.Merge(model_text_proto, model_proto) model = model_builder.build(model_proto, is_training=True) self.assertIsNotNone(model._hard_example_miner) def test_create_rfcn_resnet_v1_model_from_config(self): model_text_proto = """ faster_rcnn { num_classes: 3 image_resizer { keep_aspect_ratio_resizer { min_dimension: 600 max_dimension: 1024 } } feature_extractor { type: 'faster_rcnn_resnet101' } first_stage_anchor_generator { grid_anchor_generator { scales: [0.25, 0.5, 1.0, 2.0] aspect_ratios: [0.5, 1.0, 2.0] height_stride: 16 width_stride: 16 } } first_stage_box_predictor_conv_hyperparams { regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } initial_crop_size: 14 maxpool_kernel_size: 2 maxpool_stride: 2 second_stage_box_predictor { rfcn_box_predictor { conv_hyperparams { op: CONV regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } } } } second_stage_post_processing { batch_non_max_suppression { score_threshold: 0.01 iou_threshold: 0.6 max_detections_per_class: 100 max_total_detections: 300 } score_converter: SOFTMAX } }""" model_proto = model_pb2.DetectionModel() text_format.Merge(model_text_proto, model_proto) for extractor_type, extractor_class in FRCNN_RESNET_FEAT_MAPS.items(): model_proto.faster_rcnn.feature_extractor.type = extractor_type model = model_builder.build(model_proto, is_training=True) self.assertIsInstance(model, rfcn_meta_arch.RFCNMetaArch) self.assertIsInstance(model._feature_extractor, extractor_class) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/builders/model_builder_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for box_coder_builder.""" import tensorflow as tf from google.protobuf import text_format from object_detection.box_coders import faster_rcnn_box_coder from object_detection.box_coders import keypoint_box_coder from object_detection.box_coders import mean_stddev_box_coder from object_detection.box_coders import square_box_coder from object_detection.builders import box_coder_builder from object_detection.protos import box_coder_pb2 class BoxCoderBuilderTest(tf.test.TestCase): def test_build_faster_rcnn_box_coder_with_defaults(self): box_coder_text_proto = """ faster_rcnn_box_coder { } """ box_coder_proto = box_coder_pb2.BoxCoder() text_format.Merge(box_coder_text_proto, box_coder_proto) box_coder_object = box_coder_builder.build(box_coder_proto) self.assertIsInstance(box_coder_object, faster_rcnn_box_coder.FasterRcnnBoxCoder) self.assertEqual(box_coder_object._scale_factors, [10.0, 10.0, 5.0, 5.0]) def test_build_faster_rcnn_box_coder_with_non_default_parameters(self): box_coder_text_proto = """ faster_rcnn_box_coder { y_scale: 6.0 x_scale: 3.0 height_scale: 7.0 width_scale: 8.0 } """ box_coder_proto = box_coder_pb2.BoxCoder() text_format.Merge(box_coder_text_proto, box_coder_proto) box_coder_object = box_coder_builder.build(box_coder_proto) self.assertIsInstance(box_coder_object, faster_rcnn_box_coder.FasterRcnnBoxCoder) self.assertEqual(box_coder_object._scale_factors, [6.0, 3.0, 7.0, 8.0]) def test_build_keypoint_box_coder_with_defaults(self): box_coder_text_proto = """ keypoint_box_coder { } """ box_coder_proto = box_coder_pb2.BoxCoder() text_format.Merge(box_coder_text_proto, box_coder_proto) box_coder_object = box_coder_builder.build(box_coder_proto) self.assertIsInstance(box_coder_object, keypoint_box_coder.KeypointBoxCoder) self.assertEqual(box_coder_object._scale_factors, [10.0, 10.0, 5.0, 5.0]) def test_build_keypoint_box_coder_with_non_default_parameters(self): box_coder_text_proto = """ keypoint_box_coder { num_keypoints: 6 y_scale: 6.0 x_scale: 3.0 height_scale: 7.0 width_scale: 8.0 } """ box_coder_proto = box_coder_pb2.BoxCoder() text_format.Merge(box_coder_text_proto, box_coder_proto) box_coder_object = box_coder_builder.build(box_coder_proto) self.assertIsInstance(box_coder_object, keypoint_box_coder.KeypointBoxCoder) self.assertEqual(box_coder_object._num_keypoints, 6) self.assertEqual(box_coder_object._scale_factors, [6.0, 3.0, 7.0, 8.0]) def test_build_mean_stddev_box_coder(self): box_coder_text_proto = """ mean_stddev_box_coder { } """ box_coder_proto = box_coder_pb2.BoxCoder() text_format.Merge(box_coder_text_proto, box_coder_proto) box_coder_object = box_coder_builder.build(box_coder_proto) self.assertTrue( isinstance(box_coder_object, mean_stddev_box_coder.MeanStddevBoxCoder)) def test_build_square_box_coder_with_defaults(self): box_coder_text_proto = """ square_box_coder { } """ box_coder_proto = box_coder_pb2.BoxCoder() text_format.Merge(box_coder_text_proto, box_coder_proto) box_coder_object = box_coder_builder.build(box_coder_proto) self.assertTrue( isinstance(box_coder_object, square_box_coder.SquareBoxCoder)) self.assertEqual(box_coder_object._scale_factors, [10.0, 10.0, 5.0]) def test_build_square_box_coder_with_non_default_parameters(self): box_coder_text_proto = """ square_box_coder { y_scale: 6.0 x_scale: 3.0 length_scale: 7.0 } """ box_coder_proto = box_coder_pb2.BoxCoder() text_format.Merge(box_coder_text_proto, box_coder_proto) box_coder_object = box_coder_builder.build(box_coder_proto) self.assertTrue( isinstance(box_coder_object, square_box_coder.SquareBoxCoder)) self.assertEqual(box_coder_object._scale_factors, [6.0, 3.0, 7.0]) def test_raise_error_on_empty_box_coder(self): box_coder_text_proto = """ """ box_coder_proto = box_coder_pb2.BoxCoder() text_format.Merge(box_coder_text_proto, box_coder_proto) with self.assertRaises(ValueError): box_coder_builder.build(box_coder_proto) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/builders/box_coder_builder_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Builder for preprocessing steps.""" import tensorflow as tf from object_detection.core import preprocessor from object_detection.protos import preprocessor_pb2 def _get_step_config_from_proto(preprocessor_step_config, step_name): """Returns the value of a field named step_name from proto. Args: preprocessor_step_config: A preprocessor_pb2.PreprocessingStep object. step_name: Name of the field to get value from. Returns: result_dict: a sub proto message from preprocessor_step_config which will be later converted to a dictionary. Raises: ValueError: If field does not exist in proto. """ for field, value in preprocessor_step_config.ListFields(): if field.name == step_name: return value raise ValueError('Could not get field %s from proto!', step_name) def _get_dict_from_proto(config): """Helper function to put all proto fields into a dictionary. For many preprocessing steps, there's an trivial 1-1 mapping from proto fields to function arguments. This function automatically populates a dictionary with the arguments from the proto. Protos that CANNOT be trivially populated include: * nested messages. * steps that check if an optional field is set (ie. where None != 0). * protos that don't map 1-1 to arguments (ie. list should be reshaped). * fields requiring additional validation (ie. repeated field has n elements). Args: config: A protobuf object that does not violate the conditions above. Returns: result_dict: |config| converted into a python dictionary. """ result_dict = {} for field, value in config.ListFields(): result_dict[field.name] = value return result_dict # A map from a PreprocessingStep proto config field name to the preprocessing # function that should be used. The PreprocessingStep proto should be parsable # with _get_dict_from_proto. PREPROCESSING_FUNCTION_MAP = { 'normalize_image': preprocessor.normalize_image, 'random_pixel_value_scale': preprocessor.random_pixel_value_scale, 'random_image_scale': preprocessor.random_image_scale, 'random_rgb_to_gray': preprocessor.random_rgb_to_gray, 'random_adjust_brightness': preprocessor.random_adjust_brightness, 'random_adjust_contrast': preprocessor.random_adjust_contrast, 'random_adjust_hue': preprocessor.random_adjust_hue, 'random_adjust_saturation': preprocessor.random_adjust_saturation, 'random_distort_color': preprocessor.random_distort_color, 'random_jitter_boxes': preprocessor.random_jitter_boxes, 'random_crop_to_aspect_ratio': preprocessor.random_crop_to_aspect_ratio, 'random_black_patches': preprocessor.random_black_patches, 'rgb_to_gray': preprocessor.rgb_to_gray, 'scale_boxes_to_pixel_coordinates': ( preprocessor.scale_boxes_to_pixel_coordinates), 'subtract_channel_mean': preprocessor.subtract_channel_mean, 'convert_class_logits_to_softmax': preprocessor.convert_class_logits_to_softmax, } # A map to convert from preprocessor_pb2.ResizeImage.Method enum to # tf.image.ResizeMethod. RESIZE_METHOD_MAP = { preprocessor_pb2.ResizeImage.AREA: tf.image.ResizeMethod.AREA, preprocessor_pb2.ResizeImage.BICUBIC: tf.image.ResizeMethod.BICUBIC, preprocessor_pb2.ResizeImage.BILINEAR: tf.image.ResizeMethod.BILINEAR, preprocessor_pb2.ResizeImage.NEAREST_NEIGHBOR: ( tf.image.ResizeMethod.NEAREST_NEIGHBOR), } def build(preprocessor_step_config): """Builds preprocessing step based on the configuration. Args: preprocessor_step_config: PreprocessingStep configuration proto. Returns: function, argmap: A callable function and an argument map to call function with. Raises: ValueError: On invalid configuration. """ step_type = preprocessor_step_config.WhichOneof('preprocessing_step') if step_type in PREPROCESSING_FUNCTION_MAP: preprocessing_function = PREPROCESSING_FUNCTION_MAP[step_type] step_config = _get_step_config_from_proto(preprocessor_step_config, step_type) function_args = _get_dict_from_proto(step_config) return (preprocessing_function, function_args) if step_type == 'random_horizontal_flip': config = preprocessor_step_config.random_horizontal_flip return (preprocessor.random_horizontal_flip, { 'keypoint_flip_permutation': tuple( config.keypoint_flip_permutation), }) if step_type == 'random_vertical_flip': config = preprocessor_step_config.random_vertical_flip return (preprocessor.random_vertical_flip, { 'keypoint_flip_permutation': tuple( config.keypoint_flip_permutation), }) if step_type == 'random_rotation90': return (preprocessor.random_rotation90, {}) if step_type == 'random_crop_image': config = preprocessor_step_config.random_crop_image return (preprocessor.random_crop_image, { 'min_object_covered': config.min_object_covered, 'aspect_ratio_range': (config.min_aspect_ratio, config.max_aspect_ratio), 'area_range': (config.min_area, config.max_area), 'overlap_thresh': config.overlap_thresh, 'clip_boxes': config.clip_boxes, 'random_coef': config.random_coef, }) if step_type == 'random_pad_image': config = preprocessor_step_config.random_pad_image min_image_size = None if (config.HasField('min_image_height') != config.HasField('min_image_width')): raise ValueError('min_image_height and min_image_width should be either ' 'both set or both unset.') if config.HasField('min_image_height'): min_image_size = (config.min_image_height, config.min_image_width) max_image_size = None if (config.HasField('max_image_height') != config.HasField('max_image_width')): raise ValueError('max_image_height and max_image_width should be either ' 'both set or both unset.') if config.HasField('max_image_height'): max_image_size = (config.max_image_height, config.max_image_width) pad_color = config.pad_color or None if pad_color: if len(pad_color) == 3: pad_color = tf.to_float([x for x in config.pad_color]) else: raise ValueError('pad_color should have 3 elements (RGB) if set!') return (preprocessor.random_pad_image, { 'min_image_size': min_image_size, 'max_image_size': max_image_size, 'pad_color': pad_color, }) if step_type == 'random_crop_pad_image': config = preprocessor_step_config.random_crop_pad_image min_padded_size_ratio = config.min_padded_size_ratio if min_padded_size_ratio and len(min_padded_size_ratio) != 2: raise ValueError('min_padded_size_ratio should have 2 elements if set!') max_padded_size_ratio = config.max_padded_size_ratio if max_padded_size_ratio and len(max_padded_size_ratio) != 2: raise ValueError('max_padded_size_ratio should have 2 elements if set!') pad_color = config.pad_color if pad_color and len(pad_color) != 3: raise ValueError('pad_color should have 3 elements if set!') kwargs = { 'min_object_covered': config.min_object_covered, 'aspect_ratio_range': (config.min_aspect_ratio, config.max_aspect_ratio), 'area_range': (config.min_area, config.max_area), 'overlap_thresh': config.overlap_thresh, 'clip_boxes': config.clip_boxes, 'random_coef': config.random_coef, } if min_padded_size_ratio: kwargs['min_padded_size_ratio'] = tuple(min_padded_size_ratio) if max_padded_size_ratio: kwargs['max_padded_size_ratio'] = tuple(max_padded_size_ratio) if pad_color: kwargs['pad_color'] = tuple(pad_color) return (preprocessor.random_crop_pad_image, kwargs) if step_type == 'random_resize_method': config = preprocessor_step_config.random_resize_method return (preprocessor.random_resize_method, { 'target_size': [config.target_height, config.target_width], }) if step_type == 'resize_image': config = preprocessor_step_config.resize_image method = RESIZE_METHOD_MAP[config.method] return (preprocessor.resize_image, { 'new_height': config.new_height, 'new_width': config.new_width, 'method': method }) if step_type == 'ssd_random_crop': config = preprocessor_step_config.ssd_random_crop if config.operations: min_object_covered = [op.min_object_covered for op in config.operations] aspect_ratio_range = [(op.min_aspect_ratio, op.max_aspect_ratio) for op in config.operations] area_range = [(op.min_area, op.max_area) for op in config.operations] overlap_thresh = [op.overlap_thresh for op in config.operations] clip_boxes = [op.clip_boxes for op in config.operations] random_coef = [op.random_coef for op in config.operations] return (preprocessor.ssd_random_crop, { 'min_object_covered': min_object_covered, 'aspect_ratio_range': aspect_ratio_range, 'area_range': area_range, 'overlap_thresh': overlap_thresh, 'clip_boxes': clip_boxes, 'random_coef': random_coef, }) return (preprocessor.ssd_random_crop, {}) if step_type == 'ssd_random_crop_pad': config = preprocessor_step_config.ssd_random_crop_pad if config.operations: min_object_covered = [op.min_object_covered for op in config.operations] aspect_ratio_range = [(op.min_aspect_ratio, op.max_aspect_ratio) for op in config.operations] area_range = [(op.min_area, op.max_area) for op in config.operations] overlap_thresh = [op.overlap_thresh for op in config.operations] clip_boxes = [op.clip_boxes for op in config.operations] random_coef = [op.random_coef for op in config.operations] min_padded_size_ratio = [tuple(op.min_padded_size_ratio) for op in config.operations] max_padded_size_ratio = [tuple(op.max_padded_size_ratio) for op in config.operations] pad_color = [(op.pad_color_r, op.pad_color_g, op.pad_color_b) for op in config.operations] return (preprocessor.ssd_random_crop_pad, { 'min_object_covered': min_object_covered, 'aspect_ratio_range': aspect_ratio_range, 'area_range': area_range, 'overlap_thresh': overlap_thresh, 'clip_boxes': clip_boxes, 'random_coef': random_coef, 'min_padded_size_ratio': min_padded_size_ratio, 'max_padded_size_ratio': max_padded_size_ratio, 'pad_color': pad_color, }) return (preprocessor.ssd_random_crop_pad, {}) if step_type == 'ssd_random_crop_fixed_aspect_ratio': config = preprocessor_step_config.ssd_random_crop_fixed_aspect_ratio if config.operations: min_object_covered = [op.min_object_covered for op in config.operations] area_range = [(op.min_area, op.max_area) for op in config.operations] overlap_thresh = [op.overlap_thresh for op in config.operations] clip_boxes = [op.clip_boxes for op in config.operations] random_coef = [op.random_coef for op in config.operations] return (preprocessor.ssd_random_crop_fixed_aspect_ratio, { 'min_object_covered': min_object_covered, 'aspect_ratio': config.aspect_ratio, 'area_range': area_range, 'overlap_thresh': overlap_thresh, 'clip_boxes': clip_boxes, 'random_coef': random_coef, }) return (preprocessor.ssd_random_crop_fixed_aspect_ratio, {}) if step_type == 'ssd_random_crop_pad_fixed_aspect_ratio': config = preprocessor_step_config.ssd_random_crop_pad_fixed_aspect_ratio kwargs = {} aspect_ratio = config.aspect_ratio if aspect_ratio: kwargs['aspect_ratio'] = aspect_ratio min_padded_size_ratio = config.min_padded_size_ratio if min_padded_size_ratio: if len(min_padded_size_ratio) != 2: raise ValueError('min_padded_size_ratio should have 2 elements if set!') kwargs['min_padded_size_ratio'] = tuple(min_padded_size_ratio) max_padded_size_ratio = config.max_padded_size_ratio if max_padded_size_ratio: if len(max_padded_size_ratio) != 2: raise ValueError('max_padded_size_ratio should have 2 elements if set!') kwargs['max_padded_size_ratio'] = tuple(max_padded_size_ratio) if config.operations: kwargs['min_object_covered'] = [op.min_object_covered for op in config.operations] kwargs['aspect_ratio_range'] = [(op.min_aspect_ratio, op.max_aspect_ratio) for op in config.operations] kwargs['area_range'] = [(op.min_area, op.max_area) for op in config.operations] kwargs['overlap_thresh'] = [op.overlap_thresh for op in config.operations] kwargs['clip_boxes'] = [op.clip_boxes for op in config.operations] kwargs['random_coef'] = [op.random_coef for op in config.operations] return (preprocessor.ssd_random_crop_pad_fixed_aspect_ratio, kwargs) raise ValueError('Unknown preprocessing step.')
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/builders/preprocessor_builder.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Builder function for post processing operations.""" import functools import tensorflow as tf from object_detection.core import post_processing from object_detection.protos import post_processing_pb2 def build(post_processing_config): """Builds callables for post-processing operations. Builds callables for non-max suppression and score conversion based on the configuration. Non-max suppression callable takes `boxes`, `scores`, and optionally `clip_window`, `parallel_iterations` `masks, and `scope` as inputs. It returns `nms_boxes`, `nms_scores`, `nms_classes` `nms_masks` and `num_detections`. See post_processing.batch_multiclass_non_max_suppression for the type and shape of these tensors. Score converter callable should be called with `input` tensor. The callable returns the output from one of 3 tf operations based on the configuration - tf.identity, tf.sigmoid or tf.nn.softmax. See tensorflow documentation for argument and return value descriptions. Args: post_processing_config: post_processing.proto object containing the parameters for the post-processing operations. Returns: non_max_suppressor_fn: Callable for non-max suppression. score_converter_fn: Callable for score conversion. Raises: ValueError: if the post_processing_config is of incorrect type. """ if not isinstance(post_processing_config, post_processing_pb2.PostProcessing): raise ValueError('post_processing_config not of type ' 'post_processing_pb2.Postprocessing.') non_max_suppressor_fn = _build_non_max_suppressor( post_processing_config.batch_non_max_suppression) score_converter_fn = _build_score_converter( post_processing_config.score_converter, post_processing_config.logit_scale) return non_max_suppressor_fn, score_converter_fn def _build_non_max_suppressor(nms_config): """Builds non-max suppresson based on the nms config. Args: nms_config: post_processing_pb2.PostProcessing.BatchNonMaxSuppression proto. Returns: non_max_suppressor_fn: Callable non-max suppressor. Raises: ValueError: On incorrect iou_threshold or on incompatible values of max_total_detections and max_detections_per_class. """ if nms_config.iou_threshold < 0 or nms_config.iou_threshold > 1.0: raise ValueError('iou_threshold not in [0, 1.0].') if nms_config.max_detections_per_class > nms_config.max_total_detections: raise ValueError('max_detections_per_class should be no greater than ' 'max_total_detections.') non_max_suppressor_fn = functools.partial( post_processing.batch_multiclass_non_max_suppression, score_thresh=nms_config.score_threshold, iou_thresh=nms_config.iou_threshold, max_size_per_class=nms_config.max_detections_per_class, max_total_size=nms_config.max_total_detections, use_static_shapes=nms_config.use_static_shapes) return non_max_suppressor_fn def _score_converter_fn_with_logit_scale(tf_score_converter_fn, logit_scale): """Create a function to scale logits then apply a Tensorflow function.""" def score_converter_fn(logits): scaled_logits = tf.divide(logits, logit_scale, name='scale_logits') return tf_score_converter_fn(scaled_logits, name='convert_scores') score_converter_fn.__name__ = '%s_with_logit_scale' % ( tf_score_converter_fn.__name__) return score_converter_fn def _build_score_converter(score_converter_config, logit_scale): """Builds score converter based on the config. Builds one of [tf.identity, tf.sigmoid, tf.softmax] score converters based on the config. Args: score_converter_config: post_processing_pb2.PostProcessing.score_converter. logit_scale: temperature to use for SOFTMAX score_converter. Returns: Callable score converter op. Raises: ValueError: On unknown score converter. """ if score_converter_config == post_processing_pb2.PostProcessing.IDENTITY: return _score_converter_fn_with_logit_scale(tf.identity, logit_scale) if score_converter_config == post_processing_pb2.PostProcessing.SIGMOID: return _score_converter_fn_with_logit_scale(tf.sigmoid, logit_scale) if score_converter_config == post_processing_pb2.PostProcessing.SOFTMAX: return _score_converter_fn_with_logit_scale(tf.nn.softmax, logit_scale) raise ValueError('Unknown score converter.')
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/builders/post_processing_builder.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for dataset_builder.""" import os import numpy as np import tensorflow as tf from google.protobuf import text_format from object_detection.builders import dataset_builder from object_detection.core import standard_fields as fields from object_detection.protos import input_reader_pb2 from object_detection.utils import dataset_util class DatasetBuilderTest(tf.test.TestCase): def create_tf_record(self, has_additional_channels=False, num_examples=1): path = os.path.join(self.get_temp_dir(), 'tfrecord') writer = tf.python_io.TFRecordWriter(path) image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8) additional_channels_tensor = np.random.randint( 255, size=(4, 5, 1)).astype(np.uint8) flat_mask = (4 * 5) * [1.0] with self.test_session(): encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).eval() encoded_additional_channels_jpeg = tf.image.encode_jpeg( tf.constant(additional_channels_tensor)).eval() for i in range(num_examples): features = { 'image/source_id': dataset_util.bytes_feature(str(i)), 'image/encoded': dataset_util.bytes_feature(encoded_jpeg), 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')), 'image/height': dataset_util.int64_feature(4), 'image/width': dataset_util.int64_feature(5), 'image/object/bbox/xmin': dataset_util.float_list_feature([0.0]), 'image/object/bbox/xmax': dataset_util.float_list_feature([1.0]), 'image/object/bbox/ymin': dataset_util.float_list_feature([0.0]), 'image/object/bbox/ymax': dataset_util.float_list_feature([1.0]), 'image/object/class/label': dataset_util.int64_list_feature([2]), 'image/object/mask': dataset_util.float_list_feature(flat_mask), } if has_additional_channels: additional_channels_key = 'image/additional_channels/encoded' features[additional_channels_key] = dataset_util.bytes_list_feature( [encoded_additional_channels_jpeg] * 2) example = tf.train.Example(features=tf.train.Features(feature=features)) writer.write(example.SerializeToString()) writer.close() return path def test_build_tf_record_input_reader(self): tf_record_path = self.create_tf_record() input_reader_text_proto = """ shuffle: false num_readers: 1 tf_record_input_reader {{ input_path: '{0}' }} """.format(tf_record_path) input_reader_proto = input_reader_pb2.InputReader() text_format.Merge(input_reader_text_proto, input_reader_proto) tensor_dict = dataset_builder.make_initializable_iterator( dataset_builder.build(input_reader_proto, batch_size=1)).get_next() with tf.train.MonitoredSession() as sess: output_dict = sess.run(tensor_dict) self.assertTrue( fields.InputDataFields.groundtruth_instance_masks not in output_dict) self.assertEquals((1, 4, 5, 3), output_dict[fields.InputDataFields.image].shape) self.assertAllEqual([[2]], output_dict[fields.InputDataFields.groundtruth_classes]) self.assertEquals( (1, 1, 4), output_dict[fields.InputDataFields.groundtruth_boxes].shape) self.assertAllEqual( [0.0, 0.0, 1.0, 1.0], output_dict[fields.InputDataFields.groundtruth_boxes][0][0]) def test_build_tf_record_input_reader_and_load_instance_masks(self): tf_record_path = self.create_tf_record() input_reader_text_proto = """ shuffle: false num_readers: 1 load_instance_masks: true tf_record_input_reader {{ input_path: '{0}' }} """.format(tf_record_path) input_reader_proto = input_reader_pb2.InputReader() text_format.Merge(input_reader_text_proto, input_reader_proto) tensor_dict = dataset_builder.make_initializable_iterator( dataset_builder.build(input_reader_proto, batch_size=1)).get_next() with tf.train.MonitoredSession() as sess: output_dict = sess.run(tensor_dict) self.assertAllEqual( (1, 1, 4, 5), output_dict[fields.InputDataFields.groundtruth_instance_masks].shape) def test_build_tf_record_input_reader_with_batch_size_two(self): tf_record_path = self.create_tf_record() input_reader_text_proto = """ shuffle: false num_readers: 1 tf_record_input_reader {{ input_path: '{0}' }} """.format(tf_record_path) input_reader_proto = input_reader_pb2.InputReader() text_format.Merge(input_reader_text_proto, input_reader_proto) def one_hot_class_encoding_fn(tensor_dict): tensor_dict[fields.InputDataFields.groundtruth_classes] = tf.one_hot( tensor_dict[fields.InputDataFields.groundtruth_classes] - 1, depth=3) return tensor_dict tensor_dict = dataset_builder.make_initializable_iterator( dataset_builder.build( input_reader_proto, transform_input_data_fn=one_hot_class_encoding_fn, batch_size=2)).get_next() with tf.train.MonitoredSession() as sess: output_dict = sess.run(tensor_dict) self.assertAllEqual([2, 4, 5, 3], output_dict[fields.InputDataFields.image].shape) self.assertAllEqual( [2, 1, 3], output_dict[fields.InputDataFields.groundtruth_classes].shape) self.assertAllEqual( [2, 1, 4], output_dict[fields.InputDataFields.groundtruth_boxes].shape) self.assertAllEqual([[[0.0, 0.0, 1.0, 1.0]], [[0.0, 0.0, 1.0, 1.0]]], output_dict[fields.InputDataFields.groundtruth_boxes]) def test_build_tf_record_input_reader_with_batch_size_two_and_masks(self): tf_record_path = self.create_tf_record() input_reader_text_proto = """ shuffle: false num_readers: 1 load_instance_masks: true tf_record_input_reader {{ input_path: '{0}' }} """.format(tf_record_path) input_reader_proto = input_reader_pb2.InputReader() text_format.Merge(input_reader_text_proto, input_reader_proto) def one_hot_class_encoding_fn(tensor_dict): tensor_dict[fields.InputDataFields.groundtruth_classes] = tf.one_hot( tensor_dict[fields.InputDataFields.groundtruth_classes] - 1, depth=3) return tensor_dict tensor_dict = dataset_builder.make_initializable_iterator( dataset_builder.build( input_reader_proto, transform_input_data_fn=one_hot_class_encoding_fn, batch_size=2)).get_next() with tf.train.MonitoredSession() as sess: output_dict = sess.run(tensor_dict) self.assertAllEqual( [2, 1, 4, 5], output_dict[fields.InputDataFields.groundtruth_instance_masks].shape) def test_raises_error_with_no_input_paths(self): input_reader_text_proto = """ shuffle: false num_readers: 1 load_instance_masks: true """ input_reader_proto = input_reader_pb2.InputReader() text_format.Merge(input_reader_text_proto, input_reader_proto) with self.assertRaises(ValueError): dataset_builder.build(input_reader_proto, batch_size=1) def test_sample_all_data(self): tf_record_path = self.create_tf_record(num_examples=2) input_reader_text_proto = """ shuffle: false num_readers: 1 sample_1_of_n_examples: 1 tf_record_input_reader {{ input_path: '{0}' }} """.format(tf_record_path) input_reader_proto = input_reader_pb2.InputReader() text_format.Merge(input_reader_text_proto, input_reader_proto) tensor_dict = dataset_builder.make_initializable_iterator( dataset_builder.build(input_reader_proto, batch_size=1)).get_next() with tf.train.MonitoredSession() as sess: output_dict = sess.run(tensor_dict) self.assertAllEqual(['0'], output_dict[fields.InputDataFields.source_id]) output_dict = sess.run(tensor_dict) self.assertEquals(['1'], output_dict[fields.InputDataFields.source_id]) def test_sample_one_of_n_shards(self): tf_record_path = self.create_tf_record(num_examples=4) input_reader_text_proto = """ shuffle: false num_readers: 1 sample_1_of_n_examples: 2 tf_record_input_reader {{ input_path: '{0}' }} """.format(tf_record_path) input_reader_proto = input_reader_pb2.InputReader() text_format.Merge(input_reader_text_proto, input_reader_proto) tensor_dict = dataset_builder.make_initializable_iterator( dataset_builder.build(input_reader_proto, batch_size=1)).get_next() with tf.train.MonitoredSession() as sess: output_dict = sess.run(tensor_dict) self.assertAllEqual(['0'], output_dict[fields.InputDataFields.source_id]) output_dict = sess.run(tensor_dict) self.assertEquals(['2'], output_dict[fields.InputDataFields.source_id]) class ReadDatasetTest(tf.test.TestCase): def setUp(self): self._path_template = os.path.join(self.get_temp_dir(), 'examples_%s.txt') for i in range(5): path = self._path_template % i with tf.gfile.Open(path, 'wb') as f: f.write('\n'.join([str(i + 1), str((i + 1) * 10)])) self._shuffle_path_template = os.path.join(self.get_temp_dir(), 'shuffle_%s.txt') for i in range(2): path = self._shuffle_path_template % i with tf.gfile.Open(path, 'wb') as f: f.write('\n'.join([str(i)] * 5)) def _get_dataset_next(self, files, config, batch_size): def decode_func(value): return [tf.string_to_number(value, out_type=tf.int32)] dataset = dataset_builder.read_dataset(tf.data.TextLineDataset, files, config) dataset = dataset.map(decode_func) dataset = dataset.batch(batch_size) return dataset.make_one_shot_iterator().get_next() def test_make_initializable_iterator_with_hashTable(self): keys = [1, 0, -1] dataset = tf.data.Dataset.from_tensor_slices([[1, 2, -1, 5]]) table = tf.contrib.lookup.HashTable( initializer=tf.contrib.lookup.KeyValueTensorInitializer( keys=keys, values=list(reversed(keys))), default_value=100) dataset = dataset.map(table.lookup) data = dataset_builder.make_initializable_iterator(dataset).get_next() init = tf.tables_initializer() with self.test_session() as sess: sess.run(init) self.assertAllEqual(sess.run(data), [-1, 100, 1, 100]) def test_read_dataset(self): config = input_reader_pb2.InputReader() config.num_readers = 1 config.shuffle = False data = self._get_dataset_next( [self._path_template % '*'], config, batch_size=20) with self.test_session() as sess: self.assertAllEqual( sess.run(data), [[ 1, 10, 2, 20, 3, 30, 4, 40, 5, 50, 1, 10, 2, 20, 3, 30, 4, 40, 5, 50 ]]) def test_reduce_num_reader(self): config = input_reader_pb2.InputReader() config.num_readers = 10 config.shuffle = False data = self._get_dataset_next( [self._path_template % '*'], config, batch_size=20) with self.test_session() as sess: self.assertAllEqual( sess.run(data), [[ 1, 10, 2, 20, 3, 30, 4, 40, 5, 50, 1, 10, 2, 20, 3, 30, 4, 40, 5, 50 ]]) def test_enable_shuffle(self): config = input_reader_pb2.InputReader() config.num_readers = 1 config.shuffle = True tf.set_random_seed(1) # Set graph level seed. data = self._get_dataset_next( [self._shuffle_path_template % '*'], config, batch_size=10) expected_non_shuffle_output = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1] with self.test_session() as sess: self.assertTrue( np.any(np.not_equal(sess.run(data), expected_non_shuffle_output))) def test_disable_shuffle_(self): config = input_reader_pb2.InputReader() config.num_readers = 1 config.shuffle = False data = self._get_dataset_next( [self._shuffle_path_template % '*'], config, batch_size=10) expected_non_shuffle_output = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1] with self.test_session() as sess: self.assertAllEqual(sess.run(data), [expected_non_shuffle_output]) def test_read_dataset_single_epoch(self): config = input_reader_pb2.InputReader() config.num_epochs = 1 config.num_readers = 1 config.shuffle = False data = self._get_dataset_next( [self._path_template % '0'], config, batch_size=30) with self.test_session() as sess: # First batch will retrieve as much as it can, second batch will fail. self.assertAllEqual(sess.run(data), [[1, 10]]) self.assertRaises(tf.errors.OutOfRangeError, sess.run, data) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/builders/dataset_builder_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for matcher_builder.""" import tensorflow as tf from google.protobuf import text_format from object_detection.builders import matcher_builder from object_detection.matchers import argmax_matcher from object_detection.matchers import bipartite_matcher from object_detection.protos import matcher_pb2 class MatcherBuilderTest(tf.test.TestCase): def test_build_arg_max_matcher_with_defaults(self): matcher_text_proto = """ argmax_matcher { } """ matcher_proto = matcher_pb2.Matcher() text_format.Merge(matcher_text_proto, matcher_proto) matcher_object = matcher_builder.build(matcher_proto) self.assertTrue(isinstance(matcher_object, argmax_matcher.ArgMaxMatcher)) self.assertAlmostEqual(matcher_object._matched_threshold, 0.5) self.assertAlmostEqual(matcher_object._unmatched_threshold, 0.5) self.assertTrue(matcher_object._negatives_lower_than_unmatched) self.assertFalse(matcher_object._force_match_for_each_row) def test_build_arg_max_matcher_without_thresholds(self): matcher_text_proto = """ argmax_matcher { ignore_thresholds: true } """ matcher_proto = matcher_pb2.Matcher() text_format.Merge(matcher_text_proto, matcher_proto) matcher_object = matcher_builder.build(matcher_proto) self.assertTrue(isinstance(matcher_object, argmax_matcher.ArgMaxMatcher)) self.assertEqual(matcher_object._matched_threshold, None) self.assertEqual(matcher_object._unmatched_threshold, None) self.assertTrue(matcher_object._negatives_lower_than_unmatched) self.assertFalse(matcher_object._force_match_for_each_row) def test_build_arg_max_matcher_with_non_default_parameters(self): matcher_text_proto = """ argmax_matcher { matched_threshold: 0.7 unmatched_threshold: 0.3 negatives_lower_than_unmatched: false force_match_for_each_row: true use_matmul_gather: true } """ matcher_proto = matcher_pb2.Matcher() text_format.Merge(matcher_text_proto, matcher_proto) matcher_object = matcher_builder.build(matcher_proto) self.assertTrue(isinstance(matcher_object, argmax_matcher.ArgMaxMatcher)) self.assertAlmostEqual(matcher_object._matched_threshold, 0.7) self.assertAlmostEqual(matcher_object._unmatched_threshold, 0.3) self.assertFalse(matcher_object._negatives_lower_than_unmatched) self.assertTrue(matcher_object._force_match_for_each_row) self.assertTrue(matcher_object._use_matmul_gather) def test_build_bipartite_matcher(self): matcher_text_proto = """ bipartite_matcher { } """ matcher_proto = matcher_pb2.Matcher() text_format.Merge(matcher_text_proto, matcher_proto) matcher_object = matcher_builder.build(matcher_proto) self.assertTrue( isinstance(matcher_object, bipartite_matcher.GreedyBipartiteMatcher)) def test_raise_error_on_empty_matcher(self): matcher_text_proto = """ """ matcher_proto = matcher_pb2.Matcher() text_format.Merge(matcher_text_proto, matcher_proto) with self.assertRaises(ValueError): matcher_builder.build(matcher_proto) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/builders/matcher_builder_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for preprocessor_builder.""" import tensorflow as tf from google.protobuf import text_format from object_detection.builders import preprocessor_builder from object_detection.core import preprocessor from object_detection.protos import preprocessor_pb2 class PreprocessorBuilderTest(tf.test.TestCase): def assert_dictionary_close(self, dict1, dict2): """Helper to check if two dicts with floatst or integers are close.""" self.assertEqual(sorted(dict1.keys()), sorted(dict2.keys())) for key in dict1: value = dict1[key] if isinstance(value, float): self.assertAlmostEqual(value, dict2[key]) else: self.assertEqual(value, dict2[key]) def test_build_normalize_image(self): preprocessor_text_proto = """ normalize_image { original_minval: 0.0 original_maxval: 255.0 target_minval: -1.0 target_maxval: 1.0 } """ preprocessor_proto = preprocessor_pb2.PreprocessingStep() text_format.Merge(preprocessor_text_proto, preprocessor_proto) function, args = preprocessor_builder.build(preprocessor_proto) self.assertEqual(function, preprocessor.normalize_image) self.assertEqual(args, { 'original_minval': 0.0, 'original_maxval': 255.0, 'target_minval': -1.0, 'target_maxval': 1.0, }) def test_build_random_horizontal_flip(self): preprocessor_text_proto = """ random_horizontal_flip { keypoint_flip_permutation: 1 keypoint_flip_permutation: 0 keypoint_flip_permutation: 2 keypoint_flip_permutation: 3 keypoint_flip_permutation: 5 keypoint_flip_permutation: 4 } """ preprocessor_proto = preprocessor_pb2.PreprocessingStep() text_format.Merge(preprocessor_text_proto, preprocessor_proto) function, args = preprocessor_builder.build(preprocessor_proto) self.assertEqual(function, preprocessor.random_horizontal_flip) self.assertEqual(args, {'keypoint_flip_permutation': (1, 0, 2, 3, 5, 4)}) def test_build_random_vertical_flip(self): preprocessor_text_proto = """ random_vertical_flip { keypoint_flip_permutation: 1 keypoint_flip_permutation: 0 keypoint_flip_permutation: 2 keypoint_flip_permutation: 3 keypoint_flip_permutation: 5 keypoint_flip_permutation: 4 } """ preprocessor_proto = preprocessor_pb2.PreprocessingStep() text_format.Merge(preprocessor_text_proto, preprocessor_proto) function, args = preprocessor_builder.build(preprocessor_proto) self.assertEqual(function, preprocessor.random_vertical_flip) self.assertEqual(args, {'keypoint_flip_permutation': (1, 0, 2, 3, 5, 4)}) def test_build_random_rotation90(self): preprocessor_text_proto = """ random_rotation90 {} """ preprocessor_proto = preprocessor_pb2.PreprocessingStep() text_format.Merge(preprocessor_text_proto, preprocessor_proto) function, args = preprocessor_builder.build(preprocessor_proto) self.assertEqual(function, preprocessor.random_rotation90) self.assertEqual(args, {}) def test_build_random_pixel_value_scale(self): preprocessor_text_proto = """ random_pixel_value_scale { minval: 0.8 maxval: 1.2 } """ preprocessor_proto = preprocessor_pb2.PreprocessingStep() text_format.Merge(preprocessor_text_proto, preprocessor_proto) function, args = preprocessor_builder.build(preprocessor_proto) self.assertEqual(function, preprocessor.random_pixel_value_scale) self.assert_dictionary_close(args, {'minval': 0.8, 'maxval': 1.2}) def test_build_random_image_scale(self): preprocessor_text_proto = """ random_image_scale { min_scale_ratio: 0.8 max_scale_ratio: 2.2 } """ preprocessor_proto = preprocessor_pb2.PreprocessingStep() text_format.Merge(preprocessor_text_proto, preprocessor_proto) function, args = preprocessor_builder.build(preprocessor_proto) self.assertEqual(function, preprocessor.random_image_scale) self.assert_dictionary_close(args, {'min_scale_ratio': 0.8, 'max_scale_ratio': 2.2}) def test_build_random_rgb_to_gray(self): preprocessor_text_proto = """ random_rgb_to_gray { probability: 0.8 } """ preprocessor_proto = preprocessor_pb2.PreprocessingStep() text_format.Merge(preprocessor_text_proto, preprocessor_proto) function, args = preprocessor_builder.build(preprocessor_proto) self.assertEqual(function, preprocessor.random_rgb_to_gray) self.assert_dictionary_close(args, {'probability': 0.8}) def test_build_random_adjust_brightness(self): preprocessor_text_proto = """ random_adjust_brightness { max_delta: 0.2 } """ preprocessor_proto = preprocessor_pb2.PreprocessingStep() text_format.Merge(preprocessor_text_proto, preprocessor_proto) function, args = preprocessor_builder.build(preprocessor_proto) self.assertEqual(function, preprocessor.random_adjust_brightness) self.assert_dictionary_close(args, {'max_delta': 0.2}) def test_build_random_adjust_contrast(self): preprocessor_text_proto = """ random_adjust_contrast { min_delta: 0.7 max_delta: 1.1 } """ preprocessor_proto = preprocessor_pb2.PreprocessingStep() text_format.Merge(preprocessor_text_proto, preprocessor_proto) function, args = preprocessor_builder.build(preprocessor_proto) self.assertEqual(function, preprocessor.random_adjust_contrast) self.assert_dictionary_close(args, {'min_delta': 0.7, 'max_delta': 1.1}) def test_build_random_adjust_hue(self): preprocessor_text_proto = """ random_adjust_hue { max_delta: 0.01 } """ preprocessor_proto = preprocessor_pb2.PreprocessingStep() text_format.Merge(preprocessor_text_proto, preprocessor_proto) function, args = preprocessor_builder.build(preprocessor_proto) self.assertEqual(function, preprocessor.random_adjust_hue) self.assert_dictionary_close(args, {'max_delta': 0.01}) def test_build_random_adjust_saturation(self): preprocessor_text_proto = """ random_adjust_saturation { min_delta: 0.75 max_delta: 1.15 } """ preprocessor_proto = preprocessor_pb2.PreprocessingStep() text_format.Merge(preprocessor_text_proto, preprocessor_proto) function, args = preprocessor_builder.build(preprocessor_proto) self.assertEqual(function, preprocessor.random_adjust_saturation) self.assert_dictionary_close(args, {'min_delta': 0.75, 'max_delta': 1.15}) def test_build_random_distort_color(self): preprocessor_text_proto = """ random_distort_color { color_ordering: 1 } """ preprocessor_proto = preprocessor_pb2.PreprocessingStep() text_format.Merge(preprocessor_text_proto, preprocessor_proto) function, args = preprocessor_builder.build(preprocessor_proto) self.assertEqual(function, preprocessor.random_distort_color) self.assertEqual(args, {'color_ordering': 1}) def test_build_random_jitter_boxes(self): preprocessor_text_proto = """ random_jitter_boxes { ratio: 0.1 } """ preprocessor_proto = preprocessor_pb2.PreprocessingStep() text_format.Merge(preprocessor_text_proto, preprocessor_proto) function, args = preprocessor_builder.build(preprocessor_proto) self.assertEqual(function, preprocessor.random_jitter_boxes) self.assert_dictionary_close(args, {'ratio': 0.1}) def test_build_random_crop_image(self): preprocessor_text_proto = """ random_crop_image { min_object_covered: 0.75 min_aspect_ratio: 0.75 max_aspect_ratio: 1.5 min_area: 0.25 max_area: 0.875 overlap_thresh: 0.5 clip_boxes: False random_coef: 0.125 } """ preprocessor_proto = preprocessor_pb2.PreprocessingStep() text_format.Merge(preprocessor_text_proto, preprocessor_proto) function, args = preprocessor_builder.build(preprocessor_proto) self.assertEqual(function, preprocessor.random_crop_image) self.assertEqual(args, { 'min_object_covered': 0.75, 'aspect_ratio_range': (0.75, 1.5), 'area_range': (0.25, 0.875), 'overlap_thresh': 0.5, 'clip_boxes': False, 'random_coef': 0.125, }) def test_build_random_pad_image(self): preprocessor_text_proto = """ random_pad_image { } """ preprocessor_proto = preprocessor_pb2.PreprocessingStep() text_format.Merge(preprocessor_text_proto, preprocessor_proto) function, args = preprocessor_builder.build(preprocessor_proto) self.assertEqual(function, preprocessor.random_pad_image) self.assertEqual(args, { 'min_image_size': None, 'max_image_size': None, 'pad_color': None, }) def test_build_random_crop_pad_image(self): preprocessor_text_proto = """ random_crop_pad_image { min_object_covered: 0.75 min_aspect_ratio: 0.75 max_aspect_ratio: 1.5 min_area: 0.25 max_area: 0.875 overlap_thresh: 0.5 clip_boxes: False random_coef: 0.125 } """ preprocessor_proto = preprocessor_pb2.PreprocessingStep() text_format.Merge(preprocessor_text_proto, preprocessor_proto) function, args = preprocessor_builder.build(preprocessor_proto) self.assertEqual(function, preprocessor.random_crop_pad_image) self.assertEqual(args, { 'min_object_covered': 0.75, 'aspect_ratio_range': (0.75, 1.5), 'area_range': (0.25, 0.875), 'overlap_thresh': 0.5, 'clip_boxes': False, 'random_coef': 0.125, }) def test_build_random_crop_pad_image_with_optional_parameters(self): preprocessor_text_proto = """ random_crop_pad_image { min_object_covered: 0.75 min_aspect_ratio: 0.75 max_aspect_ratio: 1.5 min_area: 0.25 max_area: 0.875 overlap_thresh: 0.5 clip_boxes: False random_coef: 0.125 min_padded_size_ratio: 0.5 min_padded_size_ratio: 0.75 max_padded_size_ratio: 0.5 max_padded_size_ratio: 0.75 pad_color: 0.5 pad_color: 0.5 pad_color: 1.0 } """ preprocessor_proto = preprocessor_pb2.PreprocessingStep() text_format.Merge(preprocessor_text_proto, preprocessor_proto) function, args = preprocessor_builder.build(preprocessor_proto) self.assertEqual(function, preprocessor.random_crop_pad_image) self.assertEqual(args, { 'min_object_covered': 0.75, 'aspect_ratio_range': (0.75, 1.5), 'area_range': (0.25, 0.875), 'overlap_thresh': 0.5, 'clip_boxes': False, 'random_coef': 0.125, 'min_padded_size_ratio': (0.5, 0.75), 'max_padded_size_ratio': (0.5, 0.75), 'pad_color': (0.5, 0.5, 1.0) }) def test_build_random_crop_to_aspect_ratio(self): preprocessor_text_proto = """ random_crop_to_aspect_ratio { aspect_ratio: 0.85 overlap_thresh: 0.35 clip_boxes: False } """ preprocessor_proto = preprocessor_pb2.PreprocessingStep() text_format.Merge(preprocessor_text_proto, preprocessor_proto) function, args = preprocessor_builder.build(preprocessor_proto) self.assertEqual(function, preprocessor.random_crop_to_aspect_ratio) self.assert_dictionary_close(args, {'aspect_ratio': 0.85, 'overlap_thresh': 0.35, 'clip_boxes': False}) def test_build_random_black_patches(self): preprocessor_text_proto = """ random_black_patches { max_black_patches: 20 probability: 0.95 size_to_image_ratio: 0.12 } """ preprocessor_proto = preprocessor_pb2.PreprocessingStep() text_format.Merge(preprocessor_text_proto, preprocessor_proto) function, args = preprocessor_builder.build(preprocessor_proto) self.assertEqual(function, preprocessor.random_black_patches) self.assert_dictionary_close(args, {'max_black_patches': 20, 'probability': 0.95, 'size_to_image_ratio': 0.12}) def test_build_random_resize_method(self): preprocessor_text_proto = """ random_resize_method { target_height: 75 target_width: 100 } """ preprocessor_proto = preprocessor_pb2.PreprocessingStep() text_format.Merge(preprocessor_text_proto, preprocessor_proto) function, args = preprocessor_builder.build(preprocessor_proto) self.assertEqual(function, preprocessor.random_resize_method) self.assert_dictionary_close(args, {'target_size': [75, 100]}) def test_build_scale_boxes_to_pixel_coordinates(self): preprocessor_text_proto = """ scale_boxes_to_pixel_coordinates {} """ preprocessor_proto = preprocessor_pb2.PreprocessingStep() text_format.Merge(preprocessor_text_proto, preprocessor_proto) function, args = preprocessor_builder.build(preprocessor_proto) self.assertEqual(function, preprocessor.scale_boxes_to_pixel_coordinates) self.assertEqual(args, {}) def test_build_resize_image(self): preprocessor_text_proto = """ resize_image { new_height: 75 new_width: 100 method: BICUBIC } """ preprocessor_proto = preprocessor_pb2.PreprocessingStep() text_format.Merge(preprocessor_text_proto, preprocessor_proto) function, args = preprocessor_builder.build(preprocessor_proto) self.assertEqual(function, preprocessor.resize_image) self.assertEqual(args, {'new_height': 75, 'new_width': 100, 'method': tf.image.ResizeMethod.BICUBIC}) def test_build_rgb_to_gray(self): preprocessor_text_proto = """ rgb_to_gray {} """ preprocessor_proto = preprocessor_pb2.PreprocessingStep() text_format.Merge(preprocessor_text_proto, preprocessor_proto) function, args = preprocessor_builder.build(preprocessor_proto) self.assertEqual(function, preprocessor.rgb_to_gray) self.assertEqual(args, {}) def test_build_subtract_channel_mean(self): preprocessor_text_proto = """ subtract_channel_mean { means: [1.0, 2.0, 3.0] } """ preprocessor_proto = preprocessor_pb2.PreprocessingStep() text_format.Merge(preprocessor_text_proto, preprocessor_proto) function, args = preprocessor_builder.build(preprocessor_proto) self.assertEqual(function, preprocessor.subtract_channel_mean) self.assertEqual(args, {'means': [1.0, 2.0, 3.0]}) def test_build_ssd_random_crop(self): preprocessor_text_proto = """ ssd_random_crop { operations { min_object_covered: 0.0 min_aspect_ratio: 0.875 max_aspect_ratio: 1.125 min_area: 0.5 max_area: 1.0 overlap_thresh: 0.0 clip_boxes: False random_coef: 0.375 } operations { min_object_covered: 0.25 min_aspect_ratio: 0.75 max_aspect_ratio: 1.5 min_area: 0.5 max_area: 1.0 overlap_thresh: 0.25 clip_boxes: True random_coef: 0.375 } } """ preprocessor_proto = preprocessor_pb2.PreprocessingStep() text_format.Merge(preprocessor_text_proto, preprocessor_proto) function, args = preprocessor_builder.build(preprocessor_proto) self.assertEqual(function, preprocessor.ssd_random_crop) self.assertEqual(args, {'min_object_covered': [0.0, 0.25], 'aspect_ratio_range': [(0.875, 1.125), (0.75, 1.5)], 'area_range': [(0.5, 1.0), (0.5, 1.0)], 'overlap_thresh': [0.0, 0.25], 'clip_boxes': [False, True], 'random_coef': [0.375, 0.375]}) def test_build_ssd_random_crop_empty_operations(self): preprocessor_text_proto = """ ssd_random_crop { } """ preprocessor_proto = preprocessor_pb2.PreprocessingStep() text_format.Merge(preprocessor_text_proto, preprocessor_proto) function, args = preprocessor_builder.build(preprocessor_proto) self.assertEqual(function, preprocessor.ssd_random_crop) self.assertEqual(args, {}) def test_build_ssd_random_crop_pad(self): preprocessor_text_proto = """ ssd_random_crop_pad { operations { min_object_covered: 0.0 min_aspect_ratio: 0.875 max_aspect_ratio: 1.125 min_area: 0.5 max_area: 1.0 overlap_thresh: 0.0 clip_boxes: False random_coef: 0.375 min_padded_size_ratio: [1.0, 1.0] max_padded_size_ratio: [2.0, 2.0] pad_color_r: 0.5 pad_color_g: 0.5 pad_color_b: 0.5 } operations { min_object_covered: 0.25 min_aspect_ratio: 0.75 max_aspect_ratio: 1.5 min_area: 0.5 max_area: 1.0 overlap_thresh: 0.25 clip_boxes: True random_coef: 0.375 min_padded_size_ratio: [1.0, 1.0] max_padded_size_ratio: [2.0, 2.0] pad_color_r: 0.5 pad_color_g: 0.5 pad_color_b: 0.5 } } """ preprocessor_proto = preprocessor_pb2.PreprocessingStep() text_format.Merge(preprocessor_text_proto, preprocessor_proto) function, args = preprocessor_builder.build(preprocessor_proto) self.assertEqual(function, preprocessor.ssd_random_crop_pad) self.assertEqual(args, {'min_object_covered': [0.0, 0.25], 'aspect_ratio_range': [(0.875, 1.125), (0.75, 1.5)], 'area_range': [(0.5, 1.0), (0.5, 1.0)], 'overlap_thresh': [0.0, 0.25], 'clip_boxes': [False, True], 'random_coef': [0.375, 0.375], 'min_padded_size_ratio': [(1.0, 1.0), (1.0, 1.0)], 'max_padded_size_ratio': [(2.0, 2.0), (2.0, 2.0)], 'pad_color': [(0.5, 0.5, 0.5), (0.5, 0.5, 0.5)]}) def test_build_ssd_random_crop_fixed_aspect_ratio(self): preprocessor_text_proto = """ ssd_random_crop_fixed_aspect_ratio { operations { min_object_covered: 0.0 min_area: 0.5 max_area: 1.0 overlap_thresh: 0.0 clip_boxes: False random_coef: 0.375 } operations { min_object_covered: 0.25 min_area: 0.5 max_area: 1.0 overlap_thresh: 0.25 clip_boxes: True random_coef: 0.375 } aspect_ratio: 0.875 } """ preprocessor_proto = preprocessor_pb2.PreprocessingStep() text_format.Merge(preprocessor_text_proto, preprocessor_proto) function, args = preprocessor_builder.build(preprocessor_proto) self.assertEqual(function, preprocessor.ssd_random_crop_fixed_aspect_ratio) self.assertEqual(args, {'min_object_covered': [0.0, 0.25], 'aspect_ratio': 0.875, 'area_range': [(0.5, 1.0), (0.5, 1.0)], 'overlap_thresh': [0.0, 0.25], 'clip_boxes': [False, True], 'random_coef': [0.375, 0.375]}) def test_build_ssd_random_crop_pad_fixed_aspect_ratio(self): preprocessor_text_proto = """ ssd_random_crop_pad_fixed_aspect_ratio { operations { min_object_covered: 0.0 min_aspect_ratio: 0.875 max_aspect_ratio: 1.125 min_area: 0.5 max_area: 1.0 overlap_thresh: 0.0 clip_boxes: False random_coef: 0.375 } operations { min_object_covered: 0.25 min_aspect_ratio: 0.75 max_aspect_ratio: 1.5 min_area: 0.5 max_area: 1.0 overlap_thresh: 0.25 clip_boxes: True random_coef: 0.375 } aspect_ratio: 0.875 min_padded_size_ratio: [1.0, 1.0] max_padded_size_ratio: [2.0, 2.0] } """ preprocessor_proto = preprocessor_pb2.PreprocessingStep() text_format.Merge(preprocessor_text_proto, preprocessor_proto) function, args = preprocessor_builder.build(preprocessor_proto) self.assertEqual(function, preprocessor.ssd_random_crop_pad_fixed_aspect_ratio) self.assertEqual(args, {'min_object_covered': [0.0, 0.25], 'aspect_ratio': 0.875, 'aspect_ratio_range': [(0.875, 1.125), (0.75, 1.5)], 'area_range': [(0.5, 1.0), (0.5, 1.0)], 'overlap_thresh': [0.0, 0.25], 'clip_boxes': [False, True], 'random_coef': [0.375, 0.375], 'min_padded_size_ratio': (1.0, 1.0), 'max_padded_size_ratio': (2.0, 2.0)}) def test_build_normalize_image_convert_class_logits_to_softmax(self): preprocessor_text_proto = """ convert_class_logits_to_softmax { temperature: 2 } """ preprocessor_proto = preprocessor_pb2.PreprocessingStep() text_format.Merge(preprocessor_text_proto, preprocessor_proto) function, args = preprocessor_builder.build(preprocessor_proto) self.assertEqual(function, preprocessor.convert_class_logits_to_softmax) self.assertEqual(args, {'temperature': 2}) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/builders/preprocessor_builder_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for region_similarity_calculator_builder.""" import tensorflow as tf from google.protobuf import text_format from object_detection.builders import region_similarity_calculator_builder from object_detection.core import region_similarity_calculator from object_detection.protos import region_similarity_calculator_pb2 as sim_calc_pb2 class RegionSimilarityCalculatorBuilderTest(tf.test.TestCase): def testBuildIoaSimilarityCalculator(self): similarity_calc_text_proto = """ ioa_similarity { } """ similarity_calc_proto = sim_calc_pb2.RegionSimilarityCalculator() text_format.Merge(similarity_calc_text_proto, similarity_calc_proto) similarity_calc = region_similarity_calculator_builder.build( similarity_calc_proto) self.assertTrue(isinstance(similarity_calc, region_similarity_calculator.IoaSimilarity)) def testBuildIouSimilarityCalculator(self): similarity_calc_text_proto = """ iou_similarity { } """ similarity_calc_proto = sim_calc_pb2.RegionSimilarityCalculator() text_format.Merge(similarity_calc_text_proto, similarity_calc_proto) similarity_calc = region_similarity_calculator_builder.build( similarity_calc_proto) self.assertTrue(isinstance(similarity_calc, region_similarity_calculator.IouSimilarity)) def testBuildNegSqDistSimilarityCalculator(self): similarity_calc_text_proto = """ neg_sq_dist_similarity { } """ similarity_calc_proto = sim_calc_pb2.RegionSimilarityCalculator() text_format.Merge(similarity_calc_text_proto, similarity_calc_proto) similarity_calc = region_similarity_calculator_builder.build( similarity_calc_proto) self.assertTrue(isinstance(similarity_calc, region_similarity_calculator. NegSqDistSimilarity)) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/builders/region_similarity_calculator_builder_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for post_processing_builder.""" import tensorflow as tf from google.protobuf import text_format from object_detection.builders import post_processing_builder from object_detection.protos import post_processing_pb2 class PostProcessingBuilderTest(tf.test.TestCase): def test_build_non_max_suppressor_with_correct_parameters(self): post_processing_text_proto = """ batch_non_max_suppression { score_threshold: 0.7 iou_threshold: 0.6 max_detections_per_class: 100 max_total_detections: 300 } """ post_processing_config = post_processing_pb2.PostProcessing() text_format.Merge(post_processing_text_proto, post_processing_config) non_max_suppressor, _ = post_processing_builder.build( post_processing_config) self.assertEqual(non_max_suppressor.keywords['max_size_per_class'], 100) self.assertEqual(non_max_suppressor.keywords['max_total_size'], 300) self.assertAlmostEqual(non_max_suppressor.keywords['score_thresh'], 0.7) self.assertAlmostEqual(non_max_suppressor.keywords['iou_thresh'], 0.6) def test_build_identity_score_converter(self): post_processing_text_proto = """ score_converter: IDENTITY """ post_processing_config = post_processing_pb2.PostProcessing() text_format.Merge(post_processing_text_proto, post_processing_config) _, score_converter = post_processing_builder.build(post_processing_config) self.assertEqual(score_converter.__name__, 'identity_with_logit_scale') inputs = tf.constant([1, 1], tf.float32) outputs = score_converter(inputs) with self.test_session() as sess: converted_scores = sess.run(outputs) expected_converted_scores = sess.run(inputs) self.assertAllClose(converted_scores, expected_converted_scores) def test_build_identity_score_converter_with_logit_scale(self): post_processing_text_proto = """ score_converter: IDENTITY logit_scale: 2.0 """ post_processing_config = post_processing_pb2.PostProcessing() text_format.Merge(post_processing_text_proto, post_processing_config) _, score_converter = post_processing_builder.build(post_processing_config) self.assertEqual(score_converter.__name__, 'identity_with_logit_scale') inputs = tf.constant([1, 1], tf.float32) outputs = score_converter(inputs) with self.test_session() as sess: converted_scores = sess.run(outputs) expected_converted_scores = sess.run(tf.constant([.5, .5], tf.float32)) self.assertAllClose(converted_scores, expected_converted_scores) def test_build_sigmoid_score_converter(self): post_processing_text_proto = """ score_converter: SIGMOID """ post_processing_config = post_processing_pb2.PostProcessing() text_format.Merge(post_processing_text_proto, post_processing_config) _, score_converter = post_processing_builder.build(post_processing_config) self.assertEqual(score_converter.__name__, 'sigmoid_with_logit_scale') def test_build_softmax_score_converter(self): post_processing_text_proto = """ score_converter: SOFTMAX """ post_processing_config = post_processing_pb2.PostProcessing() text_format.Merge(post_processing_text_proto, post_processing_config) _, score_converter = post_processing_builder.build(post_processing_config) self.assertEqual(score_converter.__name__, 'softmax_with_logit_scale') def test_build_softmax_score_converter_with_temperature(self): post_processing_text_proto = """ score_converter: SOFTMAX logit_scale: 2.0 """ post_processing_config = post_processing_pb2.PostProcessing() text_format.Merge(post_processing_text_proto, post_processing_config) _, score_converter = post_processing_builder.build(post_processing_config) self.assertEqual(score_converter.__name__, 'softmax_with_logit_scale') if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/builders/post_processing_builder_test.py
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/builders/__init__.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Builder function to construct tf-slim arg_scope for convolution, fc ops.""" import tensorflow as tf from object_detection.core import freezable_batch_norm from object_detection.protos import hyperparams_pb2 from object_detection.utils import context_manager slim = tf.contrib.slim class KerasLayerHyperparams(object): """ A hyperparameter configuration object for Keras layers used in Object Detection models. """ def __init__(self, hyperparams_config): """Builds keras hyperparameter config for layers based on the proto config. It automatically converts from Slim layer hyperparameter configs to Keras layer hyperparameters. Namely, it: - Builds Keras initializers/regularizers instead of Slim ones - sets weights_regularizer/initializer to kernel_regularizer/initializer - converts batchnorm decay to momentum - converts Slim l2 regularizer weights to the equivalent Keras l2 weights Contains a hyperparameter configuration for ops that specifies kernel initializer, kernel regularizer, activation. Also contains parameters for batch norm operators based on the configuration. Note that if the batch_norm parameters are not specified in the config (i.e. left to default) then batch norm is excluded from the config. Args: hyperparams_config: hyperparams.proto object containing hyperparameters. Raises: ValueError: if hyperparams_config is not of type hyperparams.Hyperparams. """ if not isinstance(hyperparams_config, hyperparams_pb2.Hyperparams): raise ValueError('hyperparams_config not of type ' 'hyperparams_pb.Hyperparams.') self._batch_norm_params = None if hyperparams_config.HasField('batch_norm'): self._batch_norm_params = _build_keras_batch_norm_params( hyperparams_config.batch_norm) self._activation_fn = _build_activation_fn(hyperparams_config.activation) # TODO(kaftan): Unclear if these kwargs apply to separable & depthwise conv # (Those might use depthwise_* instead of kernel_*) # We should probably switch to using build_conv2d_layer and # build_depthwise_conv2d_layer methods instead. self._op_params = { 'kernel_regularizer': _build_keras_regularizer( hyperparams_config.regularizer), 'kernel_initializer': _build_initializer( hyperparams_config.initializer, build_for_keras=True), 'activation': _build_activation_fn(hyperparams_config.activation) } def use_batch_norm(self): return self._batch_norm_params is not None def batch_norm_params(self, **overrides): """Returns a dict containing batchnorm layer construction hyperparameters. Optionally overrides values in the batchnorm hyperparam dict. Overrides only apply to individual calls of this method, and do not affect future calls. Args: **overrides: keyword arguments to override in the hyperparams dictionary Returns: dict containing the layer construction keyword arguments, with values overridden by the `overrides` keyword arguments. """ if self._batch_norm_params is None: new_batch_norm_params = dict() else: new_batch_norm_params = self._batch_norm_params.copy() new_batch_norm_params.update(overrides) return new_batch_norm_params def build_batch_norm(self, training=None, **overrides): """Returns a Batch Normalization layer with the appropriate hyperparams. If the hyperparams are configured to not use batch normalization, this will return a Keras Lambda layer that only applies tf.Identity, without doing any normalization. Optionally overrides values in the batch_norm hyperparam dict. Overrides only apply to individual calls of this method, and do not affect future calls. Args: training: if True, the normalization layer will normalize using the batch statistics. If False, the normalization layer will be frozen and will act as if it is being used for inference. If None, the layer will look up the Keras learning phase at `call` time to decide what to do. **overrides: batch normalization construction args to override from the batch_norm hyperparams dictionary. Returns: Either a FreezableBatchNorm layer (if use_batch_norm() is True), or a Keras Lambda layer that applies the identity (if use_batch_norm() is False) """ if self.use_batch_norm(): return freezable_batch_norm.FreezableBatchNorm( training=training, **self.batch_norm_params(**overrides) ) else: return tf.keras.layers.Lambda(tf.identity) def build_activation_layer(self, name='activation'): """Returns a Keras layer that applies the desired activation function. Args: name: The name to assign the Keras layer. Returns: A Keras lambda layer that applies the activation function specified in the hyperparam config, or applies the identity if the activation function is None. """ if self._activation_fn: return tf.keras.layers.Lambda(self._activation_fn, name=name) else: return tf.keras.layers.Lambda(tf.identity, name=name) def params(self, include_activation=False, **overrides): """Returns a dict containing the layer construction hyperparameters to use. Optionally overrides values in the returned dict. Overrides only apply to individual calls of this method, and do not affect future calls. Args: include_activation: If False, activation in the returned dictionary will be set to `None`, and the activation must be applied via a separate layer created by `build_activation_layer`. If True, `activation` in the output param dictionary will be set to the activation function specified in the hyperparams config. **overrides: keyword arguments to override in the hyperparams dictionary. Returns: dict containing the layer construction keyword arguments, with values overridden by the `overrides` keyword arguments. """ new_params = self._op_params.copy() new_params['activation'] = None if include_activation: new_params['activation'] = self._activation_fn if self.use_batch_norm() and self.batch_norm_params()['center']: new_params['use_bias'] = False else: new_params['use_bias'] = True new_params.update(**overrides) return new_params def build(hyperparams_config, is_training): """Builds tf-slim arg_scope for convolution ops based on the config. Returns an arg_scope to use for convolution ops containing weights initializer, weights regularizer, activation function, batch norm function and batch norm parameters based on the configuration. Note that if no normalization parameters are specified in the config, (i.e. left to default) then both batch norm and group norm are excluded from the arg_scope. The batch norm parameters are set for updates based on `is_training` argument and conv_hyperparams_config.batch_norm.train parameter. During training, they are updated only if batch_norm.train parameter is true. However, during eval, no updates are made to the batch norm variables. In both cases, their current values are used during forward pass. Args: hyperparams_config: hyperparams.proto object containing hyperparameters. is_training: Whether the network is in training mode. Returns: arg_scope_fn: A function to construct tf-slim arg_scope containing hyperparameters for ops. Raises: ValueError: if hyperparams_config is not of type hyperparams.Hyperparams. """ if not isinstance(hyperparams_config, hyperparams_pb2.Hyperparams): raise ValueError('hyperparams_config not of type ' 'hyperparams_pb.Hyperparams.') normalizer_fn = None batch_norm_params = None if hyperparams_config.HasField('batch_norm'): normalizer_fn = slim.batch_norm batch_norm_params = _build_batch_norm_params( hyperparams_config.batch_norm, is_training) if hyperparams_config.HasField('group_norm'): normalizer_fn = tf.contrib.layers.group_norm affected_ops = [slim.conv2d, slim.separable_conv2d, slim.conv2d_transpose] if hyperparams_config.HasField('op') and ( hyperparams_config.op == hyperparams_pb2.Hyperparams.FC): affected_ops = [slim.fully_connected] def scope_fn(): with (slim.arg_scope([slim.batch_norm], **batch_norm_params) if batch_norm_params is not None else context_manager.IdentityContextManager()): with slim.arg_scope( affected_ops, weights_regularizer=_build_slim_regularizer( hyperparams_config.regularizer), weights_initializer=_build_initializer( hyperparams_config.initializer), activation_fn=_build_activation_fn(hyperparams_config.activation), normalizer_fn=normalizer_fn) as sc: return sc return scope_fn def _build_activation_fn(activation_fn): """Builds a callable activation from config. Args: activation_fn: hyperparams_pb2.Hyperparams.activation Returns: Callable activation function. Raises: ValueError: On unknown activation function. """ if activation_fn == hyperparams_pb2.Hyperparams.NONE: return None if activation_fn == hyperparams_pb2.Hyperparams.RELU: return tf.nn.relu if activation_fn == hyperparams_pb2.Hyperparams.RELU_6: return tf.nn.relu6 raise ValueError('Unknown activation function: {}'.format(activation_fn)) def _build_slim_regularizer(regularizer): """Builds a tf-slim regularizer from config. Args: regularizer: hyperparams_pb2.Hyperparams.regularizer proto. Returns: tf-slim regularizer. Raises: ValueError: On unknown regularizer. """ regularizer_oneof = regularizer.WhichOneof('regularizer_oneof') if regularizer_oneof == 'l1_regularizer': return slim.l1_regularizer(scale=float(regularizer.l1_regularizer.weight)) if regularizer_oneof == 'l2_regularizer': return slim.l2_regularizer(scale=float(regularizer.l2_regularizer.weight)) if regularizer_oneof is None: return None raise ValueError('Unknown regularizer function: {}'.format(regularizer_oneof)) def _build_keras_regularizer(regularizer): """Builds a keras regularizer from config. Args: regularizer: hyperparams_pb2.Hyperparams.regularizer proto. Returns: Keras regularizer. Raises: ValueError: On unknown regularizer. """ regularizer_oneof = regularizer.WhichOneof('regularizer_oneof') if regularizer_oneof == 'l1_regularizer': return tf.keras.regularizers.l1(float(regularizer.l1_regularizer.weight)) if regularizer_oneof == 'l2_regularizer': # The Keras L2 regularizer weight differs from the Slim L2 regularizer # weight by a factor of 2 return tf.keras.regularizers.l2( float(regularizer.l2_regularizer.weight * 0.5)) raise ValueError('Unknown regularizer function: {}'.format(regularizer_oneof)) def _build_initializer(initializer, build_for_keras=False): """Build a tf initializer from config. Args: initializer: hyperparams_pb2.Hyperparams.regularizer proto. build_for_keras: Whether the initializers should be built for Keras operators. If false builds for Slim. Returns: tf initializer. Raises: ValueError: On unknown initializer. """ initializer_oneof = initializer.WhichOneof('initializer_oneof') if initializer_oneof == 'truncated_normal_initializer': return tf.truncated_normal_initializer( mean=initializer.truncated_normal_initializer.mean, stddev=initializer.truncated_normal_initializer.stddev) if initializer_oneof == 'random_normal_initializer': return tf.random_normal_initializer( mean=initializer.random_normal_initializer.mean, stddev=initializer.random_normal_initializer.stddev) if initializer_oneof == 'variance_scaling_initializer': enum_descriptor = (hyperparams_pb2.VarianceScalingInitializer. DESCRIPTOR.enum_types_by_name['Mode']) mode = enum_descriptor.values_by_number[initializer. variance_scaling_initializer. mode].name if build_for_keras: if initializer.variance_scaling_initializer.uniform: return tf.variance_scaling_initializer( scale=initializer.variance_scaling_initializer.factor, mode=mode.lower(), distribution='uniform') else: # In TF 1.9 release and earlier, the truncated_normal distribution was # not supported correctly. So, in these earlier versions of tensorflow, # the ValueError will be raised, and we manually truncate the # distribution scale. # # It is insufficient to just set distribution to `normal` from the # start, because the `normal` distribution in newer Tensorflow versions # creates a truncated distribution, whereas it created untruncated # distributions in older versions. try: return tf.variance_scaling_initializer( scale=initializer.variance_scaling_initializer.factor, mode=mode.lower(), distribution='truncated_normal') except ValueError: truncate_constant = 0.87962566103423978 truncated_scale = initializer.variance_scaling_initializer.factor / ( truncate_constant * truncate_constant ) return tf.variance_scaling_initializer( scale=truncated_scale, mode=mode.lower(), distribution='normal') else: return slim.variance_scaling_initializer( factor=initializer.variance_scaling_initializer.factor, mode=mode, uniform=initializer.variance_scaling_initializer.uniform) raise ValueError('Unknown initializer function: {}'.format( initializer_oneof)) def _build_batch_norm_params(batch_norm, is_training): """Build a dictionary of batch_norm params from config. Args: batch_norm: hyperparams_pb2.ConvHyperparams.batch_norm proto. is_training: Whether the models is in training mode. Returns: A dictionary containing batch_norm parameters. """ batch_norm_params = { 'decay': batch_norm.decay, 'center': batch_norm.center, 'scale': batch_norm.scale, 'epsilon': batch_norm.epsilon, # Remove is_training parameter from here and deprecate it in the proto # once we refactor Faster RCNN models to set is_training through an outer # arg_scope in the meta architecture. 'is_training': is_training and batch_norm.train, } return batch_norm_params def _build_keras_batch_norm_params(batch_norm): """Build a dictionary of Keras BatchNormalization params from config. Args: batch_norm: hyperparams_pb2.ConvHyperparams.batch_norm proto. Returns: A dictionary containing Keras BatchNormalization parameters. """ # Note: Although decay is defined to be 1 - momentum in batch_norm, # decay in the slim batch_norm layers was erroneously defined and is # actually the same as momentum in the Keras batch_norm layers. # For context, see: github.com/keras-team/keras/issues/6839 batch_norm_params = { 'momentum': batch_norm.decay, 'center': batch_norm.center, 'scale': batch_norm.scale, 'epsilon': batch_norm.epsilon, } return batch_norm_params
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/builders/hyperparams_builder.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for input_reader_builder.""" import os import numpy as np import tensorflow as tf from google.protobuf import text_format from object_detection.builders import input_reader_builder from object_detection.core import standard_fields as fields from object_detection.protos import input_reader_pb2 from object_detection.utils import dataset_util class InputReaderBuilderTest(tf.test.TestCase): def create_tf_record(self): path = os.path.join(self.get_temp_dir(), 'tfrecord') writer = tf.python_io.TFRecordWriter(path) image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8) flat_mask = (4 * 5) * [1.0] with self.test_session(): encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).eval() example = tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': dataset_util.bytes_feature(encoded_jpeg), 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')), 'image/height': dataset_util.int64_feature(4), 'image/width': dataset_util.int64_feature(5), 'image/object/bbox/xmin': dataset_util.float_list_feature([0.0]), 'image/object/bbox/xmax': dataset_util.float_list_feature([1.0]), 'image/object/bbox/ymin': dataset_util.float_list_feature([0.0]), 'image/object/bbox/ymax': dataset_util.float_list_feature([1.0]), 'image/object/class/label': dataset_util.int64_list_feature([2]), 'image/object/mask': dataset_util.float_list_feature(flat_mask), })) writer.write(example.SerializeToString()) writer.close() return path def test_build_tf_record_input_reader(self): tf_record_path = self.create_tf_record() input_reader_text_proto = """ shuffle: false num_readers: 1 tf_record_input_reader {{ input_path: '{0}' }} """.format(tf_record_path) input_reader_proto = input_reader_pb2.InputReader() text_format.Merge(input_reader_text_proto, input_reader_proto) tensor_dict = input_reader_builder.build(input_reader_proto) with tf.train.MonitoredSession() as sess: output_dict = sess.run(tensor_dict) self.assertTrue(fields.InputDataFields.groundtruth_instance_masks not in output_dict) self.assertEquals( (4, 5, 3), output_dict[fields.InputDataFields.image].shape) self.assertEquals( [2], output_dict[fields.InputDataFields.groundtruth_classes]) self.assertEquals( (1, 4), output_dict[fields.InputDataFields.groundtruth_boxes].shape) self.assertAllEqual( [0.0, 0.0, 1.0, 1.0], output_dict[fields.InputDataFields.groundtruth_boxes][0]) def test_build_tf_record_input_reader_and_load_instance_masks(self): tf_record_path = self.create_tf_record() input_reader_text_proto = """ shuffle: false num_readers: 1 load_instance_masks: true tf_record_input_reader {{ input_path: '{0}' }} """.format(tf_record_path) input_reader_proto = input_reader_pb2.InputReader() text_format.Merge(input_reader_text_proto, input_reader_proto) tensor_dict = input_reader_builder.build(input_reader_proto) with tf.train.MonitoredSession() as sess: output_dict = sess.run(tensor_dict) self.assertEquals( (4, 5, 3), output_dict[fields.InputDataFields.image].shape) self.assertEquals( [2], output_dict[fields.InputDataFields.groundtruth_classes]) self.assertEquals( (1, 4), output_dict[fields.InputDataFields.groundtruth_boxes].shape) self.assertAllEqual( [0.0, 0.0, 1.0, 1.0], output_dict[fields.InputDataFields.groundtruth_boxes][0]) self.assertAllEqual( (1, 4, 5), output_dict[fields.InputDataFields.groundtruth_instance_masks].shape) def test_raises_error_with_no_input_paths(self): input_reader_text_proto = """ shuffle: false num_readers: 1 load_instance_masks: true """ input_reader_proto = input_reader_pb2.InputReader() text_format.Merge(input_reader_text_proto, input_reader_proto) with self.assertRaises(ValueError): input_reader_builder.build(input_reader_proto) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/builders/input_reader_builder_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """tf.data.Dataset builder. Creates data sources for DetectionModels from an InputReader config. See input_reader.proto for options. Note: If users wishes to also use their own InputReaders with the Object Detection configuration framework, they should define their own builder function that wraps the build function. """ import functools import tensorflow as tf import horovod.tensorflow as hvd from object_detection.data_decoders import tf_example_decoder from object_detection.protos import input_reader_pb2 def make_initializable_iterator(dataset): """Creates an iterator, and initializes tables. This is useful in cases where make_one_shot_iterator wouldn't work because the graph contains a hash table that needs to be initialized. Args: dataset: A `tf.data.Dataset` object. Returns: A `tf.data.Iterator`. """ iterator = dataset.make_initializable_iterator() tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer) return iterator def read_dataset(file_read_func, input_files, config): """Reads a dataset, and handles repetition and shuffling. Args: file_read_func: Function to use in tf.contrib.data.parallel_interleave, to read every individual file into a tf.data.Dataset. input_files: A list of file paths to read. config: A input_reader_builder.InputReader object. Returns: A tf.data.Dataset of (undecoded) tf-records based on config. """ # Shard, shuffle, and read files. filenames = tf.gfile.Glob(input_files) if not filenames: raise ValueError('Invalid input path specified in ' '`input_reader_config`.') num_readers = config.num_readers if num_readers > len(filenames): num_readers = len(filenames) tf.logging.warning('num_readers has been reduced to %d to match input file ' 'shards.' % num_readers) filename_dataset = tf.data.Dataset.from_tensor_slices(filenames) if config.shuffle: filename_dataset = filename_dataset.shuffle( config.filenames_shuffle_buffer_size) elif num_readers > 1: tf.logging.warning('`shuffle` is false, but the input data stream is ' 'still slightly shuffled since `num_readers` > 1.') filename_dataset = filename_dataset.repeat(config.num_epochs or None) records_dataset = filename_dataset.apply( tf.contrib.data.parallel_interleave( file_read_func, cycle_length=num_readers, block_length=config.read_block_length, sloppy=config.shuffle)) if config.shuffle: records_dataset = records_dataset.shuffle(config.shuffle_buffer_size) return records_dataset def build(input_reader_config, batch_size=None, transform_input_data_fn=None, multi_gpu=True): """Builds a tf.data.Dataset. Builds a tf.data.Dataset by applying the `transform_input_data_fn` on all records. Applies a padded batch to the resulting dataset. Args: input_reader_config: A input_reader_pb2.InputReader object. batch_size: Batch size. If batch size is None, no batching is performed. transform_input_data_fn: Function to apply transformation to all records, or None if no extra decoding is required. Returns: A tf.data.Dataset based on the input_reader_config. Raises: ValueError: On invalid input reader proto. ValueError: If no input paths are specified. """ if not isinstance(input_reader_config, input_reader_pb2.InputReader): raise ValueError('input_reader_config not of type ' 'input_reader_pb2.InputReader.') if input_reader_config.WhichOneof('input_reader') == 'tf_record_input_reader': config = input_reader_config.tf_record_input_reader if not config.input_path: raise ValueError('At least one input path must be specified in ' '`input_reader_config`.') label_map_proto_file = None if input_reader_config.HasField('label_map_path'): label_map_proto_file = input_reader_config.label_map_path decoder = tf_example_decoder.TfExampleDecoder( load_instance_masks=input_reader_config.load_instance_masks, instance_mask_type=input_reader_config.mask_type, label_map_proto_file=label_map_proto_file, use_display_name=input_reader_config.use_display_name, num_additional_channels=input_reader_config.num_additional_channels) def process_fn(value): """Sets up tf graph that decodes, transforms and pads input data.""" processed_tensors = decoder.decode(value) if transform_input_data_fn is not None: processed_tensors = transform_input_data_fn(processed_tensors) return processed_tensors dataset = read_dataset( functools.partial(tf.data.TFRecordDataset, buffer_size=8 * 1000 * 1000), config.input_path[:], input_reader_config) if multi_gpu: dataset = dataset.shard(hvd.size(), hvd.rank()) # TODO(rathodv): make batch size a required argument once the old binaries # are deleted. if batch_size: num_parallel_calls = batch_size * input_reader_config.num_parallel_batches else: num_parallel_calls = input_reader_config.num_parallel_map_calls dataset = dataset.map( process_fn, num_parallel_calls=num_parallel_calls) if batch_size: dataset = dataset.apply( tf.contrib.data.batch_and_drop_remainder(batch_size)) dataset = dataset.prefetch(input_reader_config.num_prefetch_batches) return dataset raise ValueError('Unsupported input_reader_config.')
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/builders/dataset_builder.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.builders.image_resizer_builder.""" import numpy as np import tensorflow as tf from google.protobuf import text_format from object_detection.builders import image_resizer_builder from object_detection.protos import image_resizer_pb2 class ImageResizerBuilderTest(tf.test.TestCase): def _shape_of_resized_random_image_given_text_proto(self, input_shape, text_proto): image_resizer_config = image_resizer_pb2.ImageResizer() text_format.Merge(text_proto, image_resizer_config) image_resizer_fn = image_resizer_builder.build(image_resizer_config) images = tf.to_float( tf.random_uniform(input_shape, minval=0, maxval=255, dtype=tf.int32)) resized_images, _ = image_resizer_fn(images) with self.test_session() as sess: return sess.run(resized_images).shape def test_build_keep_aspect_ratio_resizer_returns_expected_shape(self): image_resizer_text_proto = """ keep_aspect_ratio_resizer { min_dimension: 10 max_dimension: 20 } """ input_shape = (50, 25, 3) expected_output_shape = (20, 10, 3) output_shape = self._shape_of_resized_random_image_given_text_proto( input_shape, image_resizer_text_proto) self.assertEqual(output_shape, expected_output_shape) def test_build_keep_aspect_ratio_resizer_grayscale(self): image_resizer_text_proto = """ keep_aspect_ratio_resizer { min_dimension: 10 max_dimension: 20 convert_to_grayscale: true } """ input_shape = (50, 25, 3) expected_output_shape = (20, 10, 1) output_shape = self._shape_of_resized_random_image_given_text_proto( input_shape, image_resizer_text_proto) self.assertEqual(output_shape, expected_output_shape) def test_build_keep_aspect_ratio_resizer_with_padding(self): image_resizer_text_proto = """ keep_aspect_ratio_resizer { min_dimension: 10 max_dimension: 20 pad_to_max_dimension: true per_channel_pad_value: 3 per_channel_pad_value: 4 per_channel_pad_value: 5 } """ input_shape = (50, 25, 3) expected_output_shape = (20, 20, 3) output_shape = self._shape_of_resized_random_image_given_text_proto( input_shape, image_resizer_text_proto) self.assertEqual(output_shape, expected_output_shape) def test_built_fixed_shape_resizer_returns_expected_shape(self): image_resizer_text_proto = """ fixed_shape_resizer { height: 10 width: 20 } """ input_shape = (50, 25, 3) expected_output_shape = (10, 20, 3) output_shape = self._shape_of_resized_random_image_given_text_proto( input_shape, image_resizer_text_proto) self.assertEqual(output_shape, expected_output_shape) def test_built_fixed_shape_resizer_grayscale(self): image_resizer_text_proto = """ fixed_shape_resizer { height: 10 width: 20 convert_to_grayscale: true } """ input_shape = (50, 25, 3) expected_output_shape = (10, 20, 1) output_shape = self._shape_of_resized_random_image_given_text_proto( input_shape, image_resizer_text_proto) self.assertEqual(output_shape, expected_output_shape) def test_raises_error_on_invalid_input(self): invalid_input = 'invalid_input' with self.assertRaises(ValueError): image_resizer_builder.build(invalid_input) def _resized_image_given_text_proto(self, image, text_proto): image_resizer_config = image_resizer_pb2.ImageResizer() text_format.Merge(text_proto, image_resizer_config) image_resizer_fn = image_resizer_builder.build(image_resizer_config) image_placeholder = tf.placeholder(tf.uint8, [1, None, None, 3]) resized_image, _ = image_resizer_fn(image_placeholder) with self.test_session() as sess: return sess.run(resized_image, feed_dict={image_placeholder: image}) def test_fixed_shape_resizer_nearest_neighbor_method(self): image_resizer_text_proto = """ fixed_shape_resizer { height: 1 width: 1 resize_method: NEAREST_NEIGHBOR } """ image = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) image = np.expand_dims(image, axis=2) image = np.tile(image, (1, 1, 3)) image = np.expand_dims(image, axis=0) resized_image = self._resized_image_given_text_proto( image, image_resizer_text_proto) vals = np.unique(resized_image).tolist() self.assertEqual(len(vals), 1) self.assertEqual(vals[0], 1) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/builders/image_resizer_builder_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for anchor_generator_builder.""" import math import tensorflow as tf from google.protobuf import text_format from object_detection.anchor_generators import grid_anchor_generator from object_detection.anchor_generators import multiple_grid_anchor_generator from object_detection.anchor_generators import multiscale_grid_anchor_generator from object_detection.builders import anchor_generator_builder from object_detection.protos import anchor_generator_pb2 class AnchorGeneratorBuilderTest(tf.test.TestCase): def assert_almost_list_equal(self, expected_list, actual_list, delta=None): self.assertEqual(len(expected_list), len(actual_list)) for expected_item, actual_item in zip(expected_list, actual_list): self.assertAlmostEqual(expected_item, actual_item, delta=delta) def test_build_grid_anchor_generator_with_defaults(self): anchor_generator_text_proto = """ grid_anchor_generator { } """ anchor_generator_proto = anchor_generator_pb2.AnchorGenerator() text_format.Merge(anchor_generator_text_proto, anchor_generator_proto) anchor_generator_object = anchor_generator_builder.build( anchor_generator_proto) self.assertTrue(isinstance(anchor_generator_object, grid_anchor_generator.GridAnchorGenerator)) self.assertListEqual(anchor_generator_object._scales, []) self.assertListEqual(anchor_generator_object._aspect_ratios, []) with self.test_session() as sess: base_anchor_size, anchor_offset, anchor_stride = sess.run( [anchor_generator_object._base_anchor_size, anchor_generator_object._anchor_offset, anchor_generator_object._anchor_stride]) self.assertAllEqual(anchor_offset, [0, 0]) self.assertAllEqual(anchor_stride, [16, 16]) self.assertAllEqual(base_anchor_size, [256, 256]) def test_build_grid_anchor_generator_with_non_default_parameters(self): anchor_generator_text_proto = """ grid_anchor_generator { height: 128 width: 512 height_stride: 10 width_stride: 20 height_offset: 30 width_offset: 40 scales: [0.4, 2.2] aspect_ratios: [0.3, 4.5] } """ anchor_generator_proto = anchor_generator_pb2.AnchorGenerator() text_format.Merge(anchor_generator_text_proto, anchor_generator_proto) anchor_generator_object = anchor_generator_builder.build( anchor_generator_proto) self.assertTrue(isinstance(anchor_generator_object, grid_anchor_generator.GridAnchorGenerator)) self.assert_almost_list_equal(anchor_generator_object._scales, [0.4, 2.2]) self.assert_almost_list_equal(anchor_generator_object._aspect_ratios, [0.3, 4.5]) with self.test_session() as sess: base_anchor_size, anchor_offset, anchor_stride = sess.run( [anchor_generator_object._base_anchor_size, anchor_generator_object._anchor_offset, anchor_generator_object._anchor_stride]) self.assertAllEqual(anchor_offset, [30, 40]) self.assertAllEqual(anchor_stride, [10, 20]) self.assertAllEqual(base_anchor_size, [128, 512]) def test_build_ssd_anchor_generator_with_defaults(self): anchor_generator_text_proto = """ ssd_anchor_generator { aspect_ratios: [1.0] } """ anchor_generator_proto = anchor_generator_pb2.AnchorGenerator() text_format.Merge(anchor_generator_text_proto, anchor_generator_proto) anchor_generator_object = anchor_generator_builder.build( anchor_generator_proto) self.assertTrue(isinstance(anchor_generator_object, multiple_grid_anchor_generator. MultipleGridAnchorGenerator)) for actual_scales, expected_scales in zip( list(anchor_generator_object._scales), [(0.1, 0.2, 0.2), (0.35, 0.418), (0.499, 0.570), (0.649, 0.721), (0.799, 0.871), (0.949, 0.974)]): self.assert_almost_list_equal(expected_scales, actual_scales, delta=1e-2) for actual_aspect_ratio, expected_aspect_ratio in zip( list(anchor_generator_object._aspect_ratios), [(1.0, 2.0, 0.5)] + 5 * [(1.0, 1.0)]): self.assert_almost_list_equal(expected_aspect_ratio, actual_aspect_ratio) with self.test_session() as sess: base_anchor_size = sess.run(anchor_generator_object._base_anchor_size) self.assertAllClose(base_anchor_size, [1.0, 1.0]) def test_build_ssd_anchor_generator_with_custom_scales(self): anchor_generator_text_proto = """ ssd_anchor_generator { aspect_ratios: [1.0] scales: [0.1, 0.15, 0.2, 0.4, 0.6, 0.8] reduce_boxes_in_lowest_layer: false } """ anchor_generator_proto = anchor_generator_pb2.AnchorGenerator() text_format.Merge(anchor_generator_text_proto, anchor_generator_proto) anchor_generator_object = anchor_generator_builder.build( anchor_generator_proto) self.assertTrue(isinstance(anchor_generator_object, multiple_grid_anchor_generator. MultipleGridAnchorGenerator)) for actual_scales, expected_scales in zip( list(anchor_generator_object._scales), [(0.1, math.sqrt(0.1 * 0.15)), (0.15, math.sqrt(0.15 * 0.2)), (0.2, math.sqrt(0.2 * 0.4)), (0.4, math.sqrt(0.4 * 0.6)), (0.6, math.sqrt(0.6 * 0.8)), (0.8, math.sqrt(0.8 * 1.0))]): self.assert_almost_list_equal(expected_scales, actual_scales, delta=1e-2) def test_build_ssd_anchor_generator_with_custom_interpolated_scale(self): anchor_generator_text_proto = """ ssd_anchor_generator { aspect_ratios: [0.5] interpolated_scale_aspect_ratio: 0.5 reduce_boxes_in_lowest_layer: false } """ anchor_generator_proto = anchor_generator_pb2.AnchorGenerator() text_format.Merge(anchor_generator_text_proto, anchor_generator_proto) anchor_generator_object = anchor_generator_builder.build( anchor_generator_proto) self.assertTrue(isinstance(anchor_generator_object, multiple_grid_anchor_generator. MultipleGridAnchorGenerator)) for actual_aspect_ratio, expected_aspect_ratio in zip( list(anchor_generator_object._aspect_ratios), 6 * [(0.5, 0.5)]): self.assert_almost_list_equal(expected_aspect_ratio, actual_aspect_ratio) def test_build_ssd_anchor_generator_without_reduced_boxes(self): anchor_generator_text_proto = """ ssd_anchor_generator { aspect_ratios: [1.0] reduce_boxes_in_lowest_layer: false } """ anchor_generator_proto = anchor_generator_pb2.AnchorGenerator() text_format.Merge(anchor_generator_text_proto, anchor_generator_proto) anchor_generator_object = anchor_generator_builder.build( anchor_generator_proto) self.assertTrue(isinstance(anchor_generator_object, multiple_grid_anchor_generator. MultipleGridAnchorGenerator)) for actual_scales, expected_scales in zip( list(anchor_generator_object._scales), [(0.2, 0.264), (0.35, 0.418), (0.499, 0.570), (0.649, 0.721), (0.799, 0.871), (0.949, 0.974)]): self.assert_almost_list_equal(expected_scales, actual_scales, delta=1e-2) for actual_aspect_ratio, expected_aspect_ratio in zip( list(anchor_generator_object._aspect_ratios), 6 * [(1.0, 1.0)]): self.assert_almost_list_equal(expected_aspect_ratio, actual_aspect_ratio) with self.test_session() as sess: base_anchor_size = sess.run(anchor_generator_object._base_anchor_size) self.assertAllClose(base_anchor_size, [1.0, 1.0]) def test_build_ssd_anchor_generator_with_non_default_parameters(self): anchor_generator_text_proto = """ ssd_anchor_generator { num_layers: 2 min_scale: 0.3 max_scale: 0.8 aspect_ratios: [2.0] height_stride: 16 height_stride: 32 width_stride: 20 width_stride: 30 height_offset: 8 height_offset: 16 width_offset: 0 width_offset: 10 } """ anchor_generator_proto = anchor_generator_pb2.AnchorGenerator() text_format.Merge(anchor_generator_text_proto, anchor_generator_proto) anchor_generator_object = anchor_generator_builder.build( anchor_generator_proto) self.assertTrue(isinstance(anchor_generator_object, multiple_grid_anchor_generator. MultipleGridAnchorGenerator)) for actual_scales, expected_scales in zip( list(anchor_generator_object._scales), [(0.1, 0.3, 0.3), (0.8, 0.894)]): self.assert_almost_list_equal(expected_scales, actual_scales, delta=1e-2) for actual_aspect_ratio, expected_aspect_ratio in zip( list(anchor_generator_object._aspect_ratios), [(1.0, 2.0, 0.5), (2.0, 1.0)]): self.assert_almost_list_equal(expected_aspect_ratio, actual_aspect_ratio) for actual_strides, expected_strides in zip( list(anchor_generator_object._anchor_strides), [(16, 20), (32, 30)]): self.assert_almost_list_equal(expected_strides, actual_strides) for actual_offsets, expected_offsets in zip( list(anchor_generator_object._anchor_offsets), [(8, 0), (16, 10)]): self.assert_almost_list_equal(expected_offsets, actual_offsets) with self.test_session() as sess: base_anchor_size = sess.run(anchor_generator_object._base_anchor_size) self.assertAllClose(base_anchor_size, [1.0, 1.0]) def test_raise_value_error_on_empty_anchor_genertor(self): anchor_generator_text_proto = """ """ anchor_generator_proto = anchor_generator_pb2.AnchorGenerator() text_format.Merge(anchor_generator_text_proto, anchor_generator_proto) with self.assertRaises(ValueError): anchor_generator_builder.build(anchor_generator_proto) def test_build_multiscale_anchor_generator_custom_aspect_ratios(self): anchor_generator_text_proto = """ multiscale_anchor_generator { aspect_ratios: [1.0] } """ anchor_generator_proto = anchor_generator_pb2.AnchorGenerator() text_format.Merge(anchor_generator_text_proto, anchor_generator_proto) anchor_generator_object = anchor_generator_builder.build( anchor_generator_proto) self.assertTrue(isinstance(anchor_generator_object, multiscale_grid_anchor_generator. MultiscaleGridAnchorGenerator)) for level, anchor_grid_info in zip( range(3, 8), anchor_generator_object._anchor_grid_info): self.assertEqual(set(anchor_grid_info.keys()), set(['level', 'info'])) self.assertTrue(level, anchor_grid_info['level']) self.assertEqual(len(anchor_grid_info['info']), 4) self.assertAllClose(anchor_grid_info['info'][0], [2**0, 2**0.5]) self.assertTrue(anchor_grid_info['info'][1], 1.0) self.assertAllClose(anchor_grid_info['info'][2], [4.0 * 2**level, 4.0 * 2**level]) self.assertAllClose(anchor_grid_info['info'][3], [2**level, 2**level]) self.assertTrue(anchor_generator_object._normalize_coordinates) def test_build_multiscale_anchor_generator_with_anchors_in_pixel_coordinates( self): anchor_generator_text_proto = """ multiscale_anchor_generator { aspect_ratios: [1.0] normalize_coordinates: false } """ anchor_generator_proto = anchor_generator_pb2.AnchorGenerator() text_format.Merge(anchor_generator_text_proto, anchor_generator_proto) anchor_generator_object = anchor_generator_builder.build( anchor_generator_proto) self.assertTrue(isinstance(anchor_generator_object, multiscale_grid_anchor_generator. MultiscaleGridAnchorGenerator)) self.assertFalse(anchor_generator_object._normalize_coordinates) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/builders/anchor_generator_builder_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A function to build an object detection anchor generator from config.""" from object_detection.anchor_generators import grid_anchor_generator from object_detection.anchor_generators import multiple_grid_anchor_generator from object_detection.anchor_generators import multiscale_grid_anchor_generator from object_detection.protos import anchor_generator_pb2 def build(anchor_generator_config): """Builds an anchor generator based on the config. Args: anchor_generator_config: An anchor_generator.proto object containing the config for the desired anchor generator. Returns: Anchor generator based on the config. Raises: ValueError: On empty anchor generator proto. """ if not isinstance(anchor_generator_config, anchor_generator_pb2.AnchorGenerator): raise ValueError('anchor_generator_config not of type ' 'anchor_generator_pb2.AnchorGenerator') if anchor_generator_config.WhichOneof( 'anchor_generator_oneof') == 'grid_anchor_generator': grid_anchor_generator_config = anchor_generator_config.grid_anchor_generator return grid_anchor_generator.GridAnchorGenerator( scales=[float(scale) for scale in grid_anchor_generator_config.scales], aspect_ratios=[float(aspect_ratio) for aspect_ratio in grid_anchor_generator_config.aspect_ratios], base_anchor_size=[grid_anchor_generator_config.height, grid_anchor_generator_config.width], anchor_stride=[grid_anchor_generator_config.height_stride, grid_anchor_generator_config.width_stride], anchor_offset=[grid_anchor_generator_config.height_offset, grid_anchor_generator_config.width_offset]) elif anchor_generator_config.WhichOneof( 'anchor_generator_oneof') == 'ssd_anchor_generator': ssd_anchor_generator_config = anchor_generator_config.ssd_anchor_generator anchor_strides = None if ssd_anchor_generator_config.height_stride: anchor_strides = zip(ssd_anchor_generator_config.height_stride, ssd_anchor_generator_config.width_stride) anchor_offsets = None if ssd_anchor_generator_config.height_offset: anchor_offsets = zip(ssd_anchor_generator_config.height_offset, ssd_anchor_generator_config.width_offset) return multiple_grid_anchor_generator.create_ssd_anchors( num_layers=ssd_anchor_generator_config.num_layers, min_scale=ssd_anchor_generator_config.min_scale, max_scale=ssd_anchor_generator_config.max_scale, scales=[float(scale) for scale in ssd_anchor_generator_config.scales], aspect_ratios=ssd_anchor_generator_config.aspect_ratios, interpolated_scale_aspect_ratio=( ssd_anchor_generator_config.interpolated_scale_aspect_ratio), base_anchor_size=[ ssd_anchor_generator_config.base_anchor_height, ssd_anchor_generator_config.base_anchor_width ], anchor_strides=anchor_strides, anchor_offsets=anchor_offsets, reduce_boxes_in_lowest_layer=( ssd_anchor_generator_config.reduce_boxes_in_lowest_layer)) elif anchor_generator_config.WhichOneof( 'anchor_generator_oneof') == 'multiscale_anchor_generator': cfg = anchor_generator_config.multiscale_anchor_generator return multiscale_grid_anchor_generator.MultiscaleGridAnchorGenerator( cfg.min_level, cfg.max_level, cfg.anchor_scale, [float(aspect_ratio) for aspect_ratio in cfg.aspect_ratios], cfg.scales_per_octave, cfg.normalize_coordinates ) else: raise ValueError('Empty anchor generator.')
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/builders/anchor_generator_builder.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Function to build box predictor from configuration.""" import collections import tensorflow as tf from object_detection.predictors import convolutional_box_predictor from object_detection.predictors import convolutional_keras_box_predictor from object_detection.predictors import mask_rcnn_box_predictor from object_detection.predictors import rfcn_box_predictor from object_detection.predictors.heads import box_head from object_detection.predictors.heads import class_head from object_detection.predictors.heads import keras_box_head from object_detection.predictors.heads import keras_class_head from object_detection.predictors.heads import mask_head from object_detection.protos import box_predictor_pb2 def build_convolutional_box_predictor(is_training, num_classes, conv_hyperparams_fn, min_depth, max_depth, num_layers_before_predictor, use_dropout, dropout_keep_prob, kernel_size, box_code_size, apply_sigmoid_to_scores=False, add_background_class=True, class_prediction_bias_init=0.0, use_depthwise=False,): """Builds the ConvolutionalBoxPredictor from the arguments. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). conv_hyperparams_fn: A function to generate tf-slim arg_scope with hyperparameters for convolution ops. min_depth: Minimum feature depth prior to predicting box encodings and class predictions. max_depth: Maximum feature depth prior to predicting box encodings and class predictions. If max_depth is set to 0, no additional feature map will be inserted before location and class predictions. num_layers_before_predictor: Number of the additional conv layers before the predictor. use_dropout: Option to use dropout or not. Note that a single dropout op is applied here prior to both box and class predictions, which stands in contrast to the ConvolutionalBoxPredictor below. dropout_keep_prob: Keep probability for dropout. This is only used if use_dropout is True. kernel_size: Size of final convolution kernel. If the spatial resolution of the feature map is smaller than the kernel size, then the kernel size is automatically set to be min(feature_width, feature_height). box_code_size: Size of encoding for each box. apply_sigmoid_to_scores: If True, apply the sigmoid on the output class_predictions. add_background_class: Whether to add an implicit background class. class_prediction_bias_init: Constant value to initialize bias of the last conv2d layer before class prediction. use_depthwise: Whether to use depthwise convolutions for prediction steps. Default is False. Returns: A ConvolutionalBoxPredictor class. """ box_prediction_head = box_head.ConvolutionalBoxHead( is_training=is_training, box_code_size=box_code_size, kernel_size=kernel_size, use_depthwise=use_depthwise) class_prediction_head = class_head.ConvolutionalClassHead( is_training=is_training, num_class_slots=num_classes + 1 if add_background_class else num_classes, use_dropout=use_dropout, dropout_keep_prob=dropout_keep_prob, kernel_size=kernel_size, apply_sigmoid_to_scores=apply_sigmoid_to_scores, class_prediction_bias_init=class_prediction_bias_init, use_depthwise=use_depthwise) other_heads = {} return convolutional_box_predictor.ConvolutionalBoxPredictor( is_training=is_training, num_classes=num_classes, box_prediction_head=box_prediction_head, class_prediction_head=class_prediction_head, other_heads=other_heads, conv_hyperparams_fn=conv_hyperparams_fn, num_layers_before_predictor=num_layers_before_predictor, min_depth=min_depth, max_depth=max_depth) def build_convolutional_keras_box_predictor(is_training, num_classes, conv_hyperparams, freeze_batchnorm, inplace_batchnorm_update, num_predictions_per_location_list, min_depth, max_depth, num_layers_before_predictor, use_dropout, dropout_keep_prob, kernel_size, box_code_size, add_background_class=True, class_prediction_bias_init=0.0, use_depthwise=False, name='BoxPredictor'): """Builds the Keras ConvolutionalBoxPredictor from the arguments. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. freeze_batchnorm: Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: Whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. num_predictions_per_location_list: A list of integers representing the number of box predictions to be made per spatial location for each feature map. min_depth: Minimum feature depth prior to predicting box encodings and class predictions. max_depth: Maximum feature depth prior to predicting box encodings and class predictions. If max_depth is set to 0, no additional feature map will be inserted before location and class predictions. num_layers_before_predictor: Number of the additional conv layers before the predictor. use_dropout: Option to use dropout or not. Note that a single dropout op is applied here prior to both box and class predictions, which stands in contrast to the ConvolutionalBoxPredictor below. dropout_keep_prob: Keep probability for dropout. This is only used if use_dropout is True. kernel_size: Size of final convolution kernel. If the spatial resolution of the feature map is smaller than the kernel size, then the kernel size is automatically set to be min(feature_width, feature_height). box_code_size: Size of encoding for each box. add_background_class: Whether to add an implicit background class. class_prediction_bias_init: constant value to initialize bias of the last conv2d layer before class prediction. use_depthwise: Whether to use depthwise convolutions for prediction steps. Default is False. name: A string name scope to assign to the box predictor. If `None`, Keras will auto-generate one from the class name. Returns: A Keras ConvolutionalBoxPredictor class. """ box_prediction_heads = [] class_prediction_heads = [] other_heads = {} for stack_index, num_predictions_per_location in enumerate( num_predictions_per_location_list): box_prediction_heads.append( keras_box_head.ConvolutionalBoxHead( is_training=is_training, box_code_size=box_code_size, kernel_size=kernel_size, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, num_predictions_per_location=num_predictions_per_location, use_depthwise=use_depthwise, name='ConvolutionalBoxHead_%d' % stack_index)) class_prediction_heads.append( keras_class_head.ConvolutionalClassHead( is_training=is_training, num_class_slots=( num_classes + 1 if add_background_class else num_classes), use_dropout=use_dropout, dropout_keep_prob=dropout_keep_prob, kernel_size=kernel_size, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, num_predictions_per_location=num_predictions_per_location, class_prediction_bias_init=class_prediction_bias_init, use_depthwise=use_depthwise, name='ConvolutionalClassHead_%d' % stack_index)) return convolutional_keras_box_predictor.ConvolutionalBoxPredictor( is_training=is_training, num_classes=num_classes, box_prediction_heads=box_prediction_heads, class_prediction_heads=class_prediction_heads, other_heads=other_heads, conv_hyperparams=conv_hyperparams, num_layers_before_predictor=num_layers_before_predictor, min_depth=min_depth, max_depth=max_depth, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, name=name) def build_weight_shared_convolutional_box_predictor( is_training, num_classes, conv_hyperparams_fn, depth, num_layers_before_predictor, box_code_size, kernel_size=3, add_background_class=True, class_prediction_bias_init=0.0, use_dropout=False, dropout_keep_prob=0.8, share_prediction_tower=False, apply_batch_norm=True, use_depthwise=False, score_converter_fn=tf.identity, box_encodings_clip_range=None): """Builds and returns a WeightSharedConvolutionalBoxPredictor class. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). conv_hyperparams_fn: A function to generate tf-slim arg_scope with hyperparameters for convolution ops. depth: depth of conv layers. num_layers_before_predictor: Number of the additional conv layers before the predictor. box_code_size: Size of encoding for each box. kernel_size: Size of final convolution kernel. add_background_class: Whether to add an implicit background class. class_prediction_bias_init: constant value to initialize bias of the last conv2d layer before class prediction. use_dropout: Whether to apply dropout to class prediction head. dropout_keep_prob: Probability of keeping activiations. share_prediction_tower: Whether to share the multi-layer tower between box prediction and class prediction heads. apply_batch_norm: Whether to apply batch normalization to conv layers in this predictor. use_depthwise: Whether to use depthwise separable conv2d instead of conv2d. score_converter_fn: Callable score converter to perform elementwise op on class scores. box_encodings_clip_range: Min and max values for clipping the box_encodings. Returns: A WeightSharedConvolutionalBoxPredictor class. """ box_prediction_head = box_head.WeightSharedConvolutionalBoxHead( box_code_size=box_code_size, kernel_size=kernel_size, use_depthwise=use_depthwise, box_encodings_clip_range=box_encodings_clip_range) class_prediction_head = ( class_head.WeightSharedConvolutionalClassHead( num_class_slots=( num_classes + 1 if add_background_class else num_classes), kernel_size=kernel_size, class_prediction_bias_init=class_prediction_bias_init, use_dropout=use_dropout, dropout_keep_prob=dropout_keep_prob, use_depthwise=use_depthwise, score_converter_fn=score_converter_fn)) other_heads = {} return convolutional_box_predictor.WeightSharedConvolutionalBoxPredictor( is_training=is_training, num_classes=num_classes, box_prediction_head=box_prediction_head, class_prediction_head=class_prediction_head, other_heads=other_heads, conv_hyperparams_fn=conv_hyperparams_fn, depth=depth, num_layers_before_predictor=num_layers_before_predictor, kernel_size=kernel_size, apply_batch_norm=apply_batch_norm, share_prediction_tower=share_prediction_tower, use_depthwise=use_depthwise) def build_mask_rcnn_box_predictor(is_training, num_classes, fc_hyperparams_fn, use_dropout, dropout_keep_prob, box_code_size, add_background_class=True, share_box_across_classes=False, predict_instance_masks=False, conv_hyperparams_fn=None, mask_height=14, mask_width=14, mask_prediction_num_conv_layers=2, mask_prediction_conv_depth=256, masks_are_class_agnostic=False, convolve_then_upsample_masks=False): """Builds and returns a MaskRCNNBoxPredictor class. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). fc_hyperparams_fn: A function to generate tf-slim arg_scope with hyperparameters for fully connected ops. use_dropout: Option to use dropout or not. Note that a single dropout op is applied here prior to both box and class predictions, which stands in contrast to the ConvolutionalBoxPredictor below. dropout_keep_prob: Keep probability for dropout. This is only used if use_dropout is True. box_code_size: Size of encoding for each box. add_background_class: Whether to add an implicit background class. share_box_across_classes: Whether to share boxes across classes rather than use a different box for each class. predict_instance_masks: If True, will add a third stage mask prediction to the returned class. conv_hyperparams_fn: A function to generate tf-slim arg_scope with hyperparameters for convolution ops. mask_height: Desired output mask height. The default value is 14. mask_width: Desired output mask width. The default value is 14. mask_prediction_num_conv_layers: Number of convolution layers applied to the image_features in mask prediction branch. mask_prediction_conv_depth: The depth for the first conv2d_transpose op applied to the image_features in the mask prediction branch. If set to 0, the depth of the convolution layers will be automatically chosen based on the number of object classes and the number of channels in the image features. masks_are_class_agnostic: Boolean determining if the mask-head is class-agnostic or not. convolve_then_upsample_masks: Whether to apply convolutions on mask features before upsampling using nearest neighbor resizing. Otherwise, mask features are resized to [`mask_height`, `mask_width`] using bilinear resizing before applying convolutions. Returns: A MaskRCNNBoxPredictor class. """ box_prediction_head = box_head.MaskRCNNBoxHead( is_training=is_training, num_classes=num_classes, fc_hyperparams_fn=fc_hyperparams_fn, use_dropout=use_dropout, dropout_keep_prob=dropout_keep_prob, box_code_size=box_code_size, share_box_across_classes=share_box_across_classes) class_prediction_head = class_head.MaskRCNNClassHead( is_training=is_training, num_class_slots=num_classes + 1 if add_background_class else num_classes, fc_hyperparams_fn=fc_hyperparams_fn, use_dropout=use_dropout, dropout_keep_prob=dropout_keep_prob) third_stage_heads = {} if predict_instance_masks: third_stage_heads[ mask_rcnn_box_predictor. MASK_PREDICTIONS] = mask_head.MaskRCNNMaskHead( num_classes=num_classes, conv_hyperparams_fn=conv_hyperparams_fn, mask_height=mask_height, mask_width=mask_width, mask_prediction_num_conv_layers=mask_prediction_num_conv_layers, mask_prediction_conv_depth=mask_prediction_conv_depth, masks_are_class_agnostic=masks_are_class_agnostic, convolve_then_upsample=convolve_then_upsample_masks) return mask_rcnn_box_predictor.MaskRCNNBoxPredictor( is_training=is_training, num_classes=num_classes, box_prediction_head=box_prediction_head, class_prediction_head=class_prediction_head, third_stage_heads=third_stage_heads) def build_score_converter(score_converter_config, is_training): """Builds score converter based on the config. Builds one of [tf.identity, tf.sigmoid] score converters based on the config and whether the BoxPredictor is for training or inference. Args: score_converter_config: box_predictor_pb2.WeightSharedConvolutionalBoxPredictor.score_converter. is_training: Indicates whether the BoxPredictor is in training mode. Returns: Callable score converter op. Raises: ValueError: On unknown score converter. """ if score_converter_config == ( box_predictor_pb2.WeightSharedConvolutionalBoxPredictor.IDENTITY): return tf.identity if score_converter_config == ( box_predictor_pb2.WeightSharedConvolutionalBoxPredictor.SIGMOID): return tf.identity if is_training else tf.sigmoid raise ValueError('Unknown score converter.') BoxEncodingsClipRange = collections.namedtuple('BoxEncodingsClipRange', ['min', 'max']) def build(argscope_fn, box_predictor_config, is_training, num_classes, add_background_class=True): """Builds box predictor based on the configuration. Builds box predictor based on the configuration. See box_predictor.proto for configurable options. Also, see box_predictor.py for more details. Args: argscope_fn: A function that takes the following inputs: * hyperparams_pb2.Hyperparams proto * a boolean indicating if the model is in training mode. and returns a tf slim argscope for Conv and FC hyperparameters. box_predictor_config: box_predictor_pb2.BoxPredictor proto containing configuration. is_training: Whether the models is in training mode. num_classes: Number of classes to predict. add_background_class: Whether to add an implicit background class. Returns: box_predictor: box_predictor.BoxPredictor object. Raises: ValueError: On unknown box predictor. """ if not isinstance(box_predictor_config, box_predictor_pb2.BoxPredictor): raise ValueError('box_predictor_config not of type ' 'box_predictor_pb2.BoxPredictor.') box_predictor_oneof = box_predictor_config.WhichOneof('box_predictor_oneof') if box_predictor_oneof == 'convolutional_box_predictor': config_box_predictor = box_predictor_config.convolutional_box_predictor conv_hyperparams_fn = argscope_fn(config_box_predictor.conv_hyperparams, is_training) return build_convolutional_box_predictor( is_training=is_training, num_classes=num_classes, add_background_class=add_background_class, conv_hyperparams_fn=conv_hyperparams_fn, use_dropout=config_box_predictor.use_dropout, dropout_keep_prob=config_box_predictor.dropout_keep_probability, box_code_size=config_box_predictor.box_code_size, kernel_size=config_box_predictor.kernel_size, num_layers_before_predictor=( config_box_predictor.num_layers_before_predictor), min_depth=config_box_predictor.min_depth, max_depth=config_box_predictor.max_depth, apply_sigmoid_to_scores=config_box_predictor.apply_sigmoid_to_scores, class_prediction_bias_init=( config_box_predictor.class_prediction_bias_init), use_depthwise=config_box_predictor.use_depthwise) if box_predictor_oneof == 'weight_shared_convolutional_box_predictor': config_box_predictor = ( box_predictor_config.weight_shared_convolutional_box_predictor) conv_hyperparams_fn = argscope_fn(config_box_predictor.conv_hyperparams, is_training) apply_batch_norm = config_box_predictor.conv_hyperparams.HasField( 'batch_norm') # During training phase, logits are used to compute the loss. Only apply # sigmoid at inference to make the inference graph TPU friendly. score_converter_fn = build_score_converter( config_box_predictor.score_converter, is_training) # Optionally apply clipping to box encodings, when box_encodings_clip_range # is set. box_encodings_clip_range = ( BoxEncodingsClipRange( min=config_box_predictor.box_encodings_clip_range.min, max=config_box_predictor.box_encodings_clip_range.max) if config_box_predictor.HasField('box_encodings_clip_range') else None) return build_weight_shared_convolutional_box_predictor( is_training=is_training, num_classes=num_classes, add_background_class=add_background_class, conv_hyperparams_fn=conv_hyperparams_fn, depth=config_box_predictor.depth, num_layers_before_predictor=( config_box_predictor.num_layers_before_predictor), box_code_size=config_box_predictor.box_code_size, kernel_size=config_box_predictor.kernel_size, class_prediction_bias_init=( config_box_predictor.class_prediction_bias_init), use_dropout=config_box_predictor.use_dropout, dropout_keep_prob=config_box_predictor.dropout_keep_probability, share_prediction_tower=config_box_predictor.share_prediction_tower, apply_batch_norm=apply_batch_norm, use_depthwise=config_box_predictor.use_depthwise, score_converter_fn=score_converter_fn, box_encodings_clip_range=box_encodings_clip_range) if box_predictor_oneof == 'mask_rcnn_box_predictor': config_box_predictor = box_predictor_config.mask_rcnn_box_predictor fc_hyperparams_fn = argscope_fn(config_box_predictor.fc_hyperparams, is_training) conv_hyperparams_fn = None if config_box_predictor.HasField('conv_hyperparams'): conv_hyperparams_fn = argscope_fn( config_box_predictor.conv_hyperparams, is_training) return build_mask_rcnn_box_predictor( is_training=is_training, num_classes=num_classes, add_background_class=add_background_class, fc_hyperparams_fn=fc_hyperparams_fn, use_dropout=config_box_predictor.use_dropout, dropout_keep_prob=config_box_predictor.dropout_keep_probability, box_code_size=config_box_predictor.box_code_size, share_box_across_classes=( config_box_predictor.share_box_across_classes), predict_instance_masks=config_box_predictor.predict_instance_masks, conv_hyperparams_fn=conv_hyperparams_fn, mask_height=config_box_predictor.mask_height, mask_width=config_box_predictor.mask_width, mask_prediction_num_conv_layers=( config_box_predictor.mask_prediction_num_conv_layers), mask_prediction_conv_depth=( config_box_predictor.mask_prediction_conv_depth), masks_are_class_agnostic=( config_box_predictor.masks_are_class_agnostic), convolve_then_upsample_masks=( config_box_predictor.convolve_then_upsample_masks)) if box_predictor_oneof == 'rfcn_box_predictor': config_box_predictor = box_predictor_config.rfcn_box_predictor conv_hyperparams_fn = argscope_fn(config_box_predictor.conv_hyperparams, is_training) box_predictor_object = rfcn_box_predictor.RfcnBoxPredictor( is_training=is_training, num_classes=num_classes, conv_hyperparams_fn=conv_hyperparams_fn, crop_size=[config_box_predictor.crop_height, config_box_predictor.crop_width], num_spatial_bins=[config_box_predictor.num_spatial_bins_height, config_box_predictor.num_spatial_bins_width], depth=config_box_predictor.depth, box_code_size=config_box_predictor.box_code_size) return box_predictor_object raise ValueError('Unknown box predictor: {}'.format(box_predictor_oneof)) def build_keras(conv_hyperparams_fn, freeze_batchnorm, inplace_batchnorm_update, num_predictions_per_location_list, box_predictor_config, is_training, num_classes, add_background_class=True): """Builds a Keras-based box predictor based on the configuration. Builds Keras-based box predictor based on the configuration. See box_predictor.proto for configurable options. Also, see box_predictor.py for more details. Args: conv_hyperparams_fn: A function that takes a hyperparams_pb2.Hyperparams proto and returns a `hyperparams_builder.KerasLayerHyperparams` for Conv or FC hyperparameters. freeze_batchnorm: Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: Whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. num_predictions_per_location_list: A list of integers representing the number of box predictions to be made per spatial location for each feature map. box_predictor_config: box_predictor_pb2.BoxPredictor proto containing configuration. is_training: Whether the models is in training mode. num_classes: Number of classes to predict. add_background_class: Whether to add an implicit background class. Returns: box_predictor: box_predictor.KerasBoxPredictor object. Raises: ValueError: On unknown box predictor, or one with no Keras box predictor. """ if not isinstance(box_predictor_config, box_predictor_pb2.BoxPredictor): raise ValueError('box_predictor_config not of type ' 'box_predictor_pb2.BoxPredictor.') box_predictor_oneof = box_predictor_config.WhichOneof('box_predictor_oneof') if box_predictor_oneof == 'convolutional_box_predictor': config_box_predictor = box_predictor_config.convolutional_box_predictor conv_hyperparams = conv_hyperparams_fn( config_box_predictor.conv_hyperparams) return build_convolutional_keras_box_predictor( is_training=is_training, num_classes=num_classes, add_background_class=add_background_class, conv_hyperparams=conv_hyperparams, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, num_predictions_per_location_list=num_predictions_per_location_list, use_dropout=config_box_predictor.use_dropout, dropout_keep_prob=config_box_predictor.dropout_keep_probability, box_code_size=config_box_predictor.box_code_size, kernel_size=config_box_predictor.kernel_size, num_layers_before_predictor=( config_box_predictor.num_layers_before_predictor), min_depth=config_box_predictor.min_depth, max_depth=config_box_predictor.max_depth, class_prediction_bias_init=( config_box_predictor.class_prediction_bias_init), use_depthwise=config_box_predictor.use_depthwise) raise ValueError( 'Unknown box predictor for Keras: {}'.format(box_predictor_oneof))
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/builders/box_predictor_builder.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Builder for region similarity calculators.""" from object_detection.core import region_similarity_calculator from object_detection.protos import region_similarity_calculator_pb2 def build(region_similarity_calculator_config): """Builds region similarity calculator based on the configuration. Builds one of [IouSimilarity, IoaSimilarity, NegSqDistSimilarity] objects. See core/region_similarity_calculator.proto for details. Args: region_similarity_calculator_config: RegionSimilarityCalculator configuration proto. Returns: region_similarity_calculator: RegionSimilarityCalculator object. Raises: ValueError: On unknown region similarity calculator. """ if not isinstance( region_similarity_calculator_config, region_similarity_calculator_pb2.RegionSimilarityCalculator): raise ValueError( 'region_similarity_calculator_config not of type ' 'region_similarity_calculator_pb2.RegionsSimilarityCalculator') similarity_calculator = region_similarity_calculator_config.WhichOneof( 'region_similarity') if similarity_calculator == 'iou_similarity': return region_similarity_calculator.IouSimilarity() if similarity_calculator == 'ioa_similarity': return region_similarity_calculator.IoaSimilarity() if similarity_calculator == 'neg_sq_dist_similarity': return region_similarity_calculator.NegSqDistSimilarity() if similarity_calculator == 'thresholded_iou_similarity': return region_similarity_calculator.ThresholdedIouSimilarity( region_similarity_calculator_config.thresholded_iou_similarity.threshold ) raise ValueError('Unknown region similarity calculator.')
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/builders/region_similarity_calculator_builder.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Builder function for image resizing operations.""" import functools import tensorflow as tf from object_detection.core import preprocessor from object_detection.protos import image_resizer_pb2 def _tf_resize_method(resize_method): """Maps image resize method from enumeration type to TensorFlow. Args: resize_method: The resize_method attribute of keep_aspect_ratio_resizer or fixed_shape_resizer. Returns: method: The corresponding TensorFlow ResizeMethod. Raises: ValueError: if `resize_method` is of unknown type. """ dict_method = { image_resizer_pb2.BILINEAR: tf.image.ResizeMethod.BILINEAR, image_resizer_pb2.NEAREST_NEIGHBOR: tf.image.ResizeMethod.NEAREST_NEIGHBOR, image_resizer_pb2.BICUBIC: tf.image.ResizeMethod.BICUBIC, image_resizer_pb2.AREA: tf.image.ResizeMethod.AREA } if resize_method in dict_method: return dict_method[resize_method] else: raise ValueError('Unknown resize_method') def build(image_resizer_config): """Builds callable for image resizing operations. Args: image_resizer_config: image_resizer.proto object containing parameters for an image resizing operation. Returns: image_resizer_fn: Callable for image resizing. This callable always takes a rank-3 image tensor (corresponding to a single image) and returns a rank-3 image tensor, possibly with new spatial dimensions. Raises: ValueError: if `image_resizer_config` is of incorrect type. ValueError: if `image_resizer_config.image_resizer_oneof` is of expected type. ValueError: if min_dimension > max_dimension when keep_aspect_ratio_resizer is used. """ if not isinstance(image_resizer_config, image_resizer_pb2.ImageResizer): raise ValueError('image_resizer_config not of type ' 'image_resizer_pb2.ImageResizer.') image_resizer_oneof = image_resizer_config.WhichOneof('image_resizer_oneof') if image_resizer_oneof == 'keep_aspect_ratio_resizer': keep_aspect_ratio_config = image_resizer_config.keep_aspect_ratio_resizer if not (keep_aspect_ratio_config.min_dimension <= keep_aspect_ratio_config.max_dimension): raise ValueError('min_dimension > max_dimension') method = _tf_resize_method(keep_aspect_ratio_config.resize_method) per_channel_pad_value = (0, 0, 0) if keep_aspect_ratio_config.per_channel_pad_value: per_channel_pad_value = tuple(keep_aspect_ratio_config. per_channel_pad_value) image_resizer_fn = functools.partial( preprocessor.resize_to_range, min_dimension=keep_aspect_ratio_config.min_dimension, max_dimension=keep_aspect_ratio_config.max_dimension, method=method, pad_to_max_dimension=keep_aspect_ratio_config.pad_to_max_dimension, per_channel_pad_value=per_channel_pad_value) if not keep_aspect_ratio_config.convert_to_grayscale: return image_resizer_fn elif image_resizer_oneof == 'fixed_shape_resizer': fixed_shape_resizer_config = image_resizer_config.fixed_shape_resizer method = _tf_resize_method(fixed_shape_resizer_config.resize_method) image_resizer_fn = functools.partial( preprocessor.resize_image, new_height=fixed_shape_resizer_config.height, new_width=fixed_shape_resizer_config.width, method=method) if not fixed_shape_resizer_config.convert_to_grayscale: return image_resizer_fn else: raise ValueError( 'Invalid image resizer option: \'%s\'.' % image_resizer_oneof) def grayscale_image_resizer(image, masks=None): """Convert to grayscale before applying image_resizer_fn. Args: image: A 3D tensor of shape [height, width, 3] masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. Returns: Note that the position of the resized_image_shape changes based on whether masks are present. resized_image: A 3D tensor of shape [new_height, new_width, 1], where the image has been resized (with bilinear interpolation) so that min(new_height, new_width) == min_dimension or max(new_height, new_width) == max_dimension. resized_masks: If masks is not None, also outputs masks. A 3D tensor of shape [num_instances, new_height, new_width]. resized_image_shape: A 1D tensor of shape [3] containing shape of the resized image. """ # image_resizer_fn returns [resized_image, resized_image_shape] if # mask==None, otherwise it returns # [resized_image, resized_mask, resized_image_shape]. In either case, we # only deal with first and last element of the returned list. retval = image_resizer_fn(image, masks) resized_image = retval[0] resized_image_shape = retval[-1] retval[0] = preprocessor.rgb_to_gray(resized_image) retval[-1] = tf.concat([resized_image_shape[:-1], [1]], 0) return retval return functools.partial(grayscale_image_resizer)
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/builders/image_resizer_builder.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests object_detection.core.hyperparams_builder.""" import numpy as np import tensorflow as tf from google.protobuf import text_format from object_detection.builders import hyperparams_builder from object_detection.core import freezable_batch_norm from object_detection.protos import hyperparams_pb2 slim = tf.contrib.slim def _get_scope_key(op): return getattr(op, '_key_op', str(op)) class HyperparamsBuilderTest(tf.test.TestCase): def test_default_arg_scope_has_conv2d_op(self): conv_hyperparams_text_proto = """ regularizer { l1_regularizer { } } initializer { truncated_normal_initializer { } } """ conv_hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) scope_fn = hyperparams_builder.build(conv_hyperparams_proto, is_training=True) scope = scope_fn() self.assertTrue(_get_scope_key(slim.conv2d) in scope) def test_default_arg_scope_has_separable_conv2d_op(self): conv_hyperparams_text_proto = """ regularizer { l1_regularizer { } } initializer { truncated_normal_initializer { } } """ conv_hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) scope_fn = hyperparams_builder.build(conv_hyperparams_proto, is_training=True) scope = scope_fn() self.assertTrue(_get_scope_key(slim.separable_conv2d) in scope) def test_default_arg_scope_has_conv2d_transpose_op(self): conv_hyperparams_text_proto = """ regularizer { l1_regularizer { } } initializer { truncated_normal_initializer { } } """ conv_hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) scope_fn = hyperparams_builder.build(conv_hyperparams_proto, is_training=True) scope = scope_fn() self.assertTrue(_get_scope_key(slim.conv2d_transpose) in scope) def test_explicit_fc_op_arg_scope_has_fully_connected_op(self): conv_hyperparams_text_proto = """ op: FC regularizer { l1_regularizer { } } initializer { truncated_normal_initializer { } } """ conv_hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) scope_fn = hyperparams_builder.build(conv_hyperparams_proto, is_training=True) scope = scope_fn() self.assertTrue(_get_scope_key(slim.fully_connected) in scope) def test_separable_conv2d_and_conv2d_and_transpose_have_same_parameters(self): conv_hyperparams_text_proto = """ regularizer { l1_regularizer { } } initializer { truncated_normal_initializer { } } """ conv_hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) scope_fn = hyperparams_builder.build(conv_hyperparams_proto, is_training=True) scope = scope_fn() kwargs_1, kwargs_2, kwargs_3 = scope.values() self.assertDictEqual(kwargs_1, kwargs_2) self.assertDictEqual(kwargs_1, kwargs_3) def test_return_l1_regularized_weights(self): conv_hyperparams_text_proto = """ regularizer { l1_regularizer { weight: 0.5 } } initializer { truncated_normal_initializer { } } """ conv_hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) scope_fn = hyperparams_builder.build(conv_hyperparams_proto, is_training=True) scope = scope_fn() conv_scope_arguments = scope.values()[0] regularizer = conv_scope_arguments['weights_regularizer'] weights = np.array([1., -1, 4., 2.]) with self.test_session() as sess: result = sess.run(regularizer(tf.constant(weights))) self.assertAllClose(np.abs(weights).sum() * 0.5, result) def test_return_l1_regularized_weights_keras(self): conv_hyperparams_text_proto = """ regularizer { l1_regularizer { weight: 0.5 } } initializer { truncated_normal_initializer { } } """ conv_hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) keras_config = hyperparams_builder.KerasLayerHyperparams( conv_hyperparams_proto) regularizer = keras_config.params()['kernel_regularizer'] weights = np.array([1., -1, 4., 2.]) with self.test_session() as sess: result = sess.run(regularizer(tf.constant(weights))) self.assertAllClose(np.abs(weights).sum() * 0.5, result) def test_return_l2_regularizer_weights(self): conv_hyperparams_text_proto = """ regularizer { l2_regularizer { weight: 0.42 } } initializer { truncated_normal_initializer { } } """ conv_hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) scope_fn = hyperparams_builder.build(conv_hyperparams_proto, is_training=True) scope = scope_fn() conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] regularizer = conv_scope_arguments['weights_regularizer'] weights = np.array([1., -1, 4., 2.]) with self.test_session() as sess: result = sess.run(regularizer(tf.constant(weights))) self.assertAllClose(np.power(weights, 2).sum() / 2.0 * 0.42, result) def test_return_l2_regularizer_weights_keras(self): conv_hyperparams_text_proto = """ regularizer { l2_regularizer { weight: 0.42 } } initializer { truncated_normal_initializer { } } """ conv_hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) keras_config = hyperparams_builder.KerasLayerHyperparams( conv_hyperparams_proto) regularizer = keras_config.params()['kernel_regularizer'] weights = np.array([1., -1, 4., 2.]) with self.test_session() as sess: result = sess.run(regularizer(tf.constant(weights))) self.assertAllClose(np.power(weights, 2).sum() / 2.0 * 0.42, result) def test_return_non_default_batch_norm_params_with_train_during_train(self): conv_hyperparams_text_proto = """ regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } batch_norm { decay: 0.7 center: false scale: true epsilon: 0.03 train: true } """ conv_hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) scope_fn = hyperparams_builder.build(conv_hyperparams_proto, is_training=True) scope = scope_fn() conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] self.assertEqual(conv_scope_arguments['normalizer_fn'], slim.batch_norm) batch_norm_params = scope[_get_scope_key(slim.batch_norm)] self.assertAlmostEqual(batch_norm_params['decay'], 0.7) self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03) self.assertFalse(batch_norm_params['center']) self.assertTrue(batch_norm_params['scale']) self.assertTrue(batch_norm_params['is_training']) def test_return_non_default_batch_norm_params_keras( self): conv_hyperparams_text_proto = """ regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } batch_norm { decay: 0.7 center: false scale: true epsilon: 0.03 } """ conv_hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) keras_config = hyperparams_builder.KerasLayerHyperparams( conv_hyperparams_proto) self.assertTrue(keras_config.use_batch_norm()) batch_norm_params = keras_config.batch_norm_params() self.assertAlmostEqual(batch_norm_params['momentum'], 0.7) self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03) self.assertFalse(batch_norm_params['center']) self.assertTrue(batch_norm_params['scale']) batch_norm_layer = keras_config.build_batch_norm() self.assertTrue(isinstance(batch_norm_layer, freezable_batch_norm.FreezableBatchNorm)) def test_return_non_default_batch_norm_params_keras_override( self): conv_hyperparams_text_proto = """ regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } batch_norm { decay: 0.7 center: false scale: true epsilon: 0.03 } """ conv_hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) keras_config = hyperparams_builder.KerasLayerHyperparams( conv_hyperparams_proto) self.assertTrue(keras_config.use_batch_norm()) batch_norm_params = keras_config.batch_norm_params(momentum=0.4) self.assertAlmostEqual(batch_norm_params['momentum'], 0.4) self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03) self.assertFalse(batch_norm_params['center']) self.assertTrue(batch_norm_params['scale']) def test_return_batch_norm_params_with_notrain_during_eval(self): conv_hyperparams_text_proto = """ regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } batch_norm { decay: 0.7 center: false scale: true epsilon: 0.03 train: true } """ conv_hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) scope_fn = hyperparams_builder.build(conv_hyperparams_proto, is_training=False) scope = scope_fn() conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] self.assertEqual(conv_scope_arguments['normalizer_fn'], slim.batch_norm) batch_norm_params = scope[_get_scope_key(slim.batch_norm)] self.assertAlmostEqual(batch_norm_params['decay'], 0.7) self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03) self.assertFalse(batch_norm_params['center']) self.assertTrue(batch_norm_params['scale']) self.assertFalse(batch_norm_params['is_training']) def test_return_batch_norm_params_with_notrain_when_train_is_false(self): conv_hyperparams_text_proto = """ regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } batch_norm { decay: 0.7 center: false scale: true epsilon: 0.03 train: false } """ conv_hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) scope_fn = hyperparams_builder.build(conv_hyperparams_proto, is_training=True) scope = scope_fn() conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] self.assertEqual(conv_scope_arguments['normalizer_fn'], slim.batch_norm) batch_norm_params = scope[_get_scope_key(slim.batch_norm)] self.assertAlmostEqual(batch_norm_params['decay'], 0.7) self.assertAlmostEqual(batch_norm_params['epsilon'], 0.03) self.assertFalse(batch_norm_params['center']) self.assertTrue(batch_norm_params['scale']) self.assertFalse(batch_norm_params['is_training']) def test_do_not_use_batch_norm_if_default(self): conv_hyperparams_text_proto = """ regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ conv_hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) scope_fn = hyperparams_builder.build(conv_hyperparams_proto, is_training=True) scope = scope_fn() conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] self.assertEqual(conv_scope_arguments['normalizer_fn'], None) def test_do_not_use_batch_norm_if_default_keras(self): conv_hyperparams_text_proto = """ regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ conv_hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) keras_config = hyperparams_builder.KerasLayerHyperparams( conv_hyperparams_proto) self.assertFalse(keras_config.use_batch_norm()) self.assertEqual(keras_config.batch_norm_params(), {}) # The batch norm builder should build an identity Lambda layer identity_layer = keras_config.build_batch_norm() self.assertTrue(isinstance(identity_layer, tf.keras.layers.Lambda)) def test_use_none_activation(self): conv_hyperparams_text_proto = """ regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } activation: NONE """ conv_hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) scope_fn = hyperparams_builder.build(conv_hyperparams_proto, is_training=True) scope = scope_fn() conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] self.assertEqual(conv_scope_arguments['activation_fn'], None) def test_use_none_activation_keras(self): conv_hyperparams_text_proto = """ regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } activation: NONE """ conv_hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) keras_config = hyperparams_builder.KerasLayerHyperparams( conv_hyperparams_proto) self.assertEqual(keras_config.params()['activation'], None) self.assertEqual( keras_config.params(include_activation=True)['activation'], None) activation_layer = keras_config.build_activation_layer() self.assertTrue(isinstance(activation_layer, tf.keras.layers.Lambda)) self.assertEqual(activation_layer.function, tf.identity) def test_use_relu_activation(self): conv_hyperparams_text_proto = """ regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } activation: RELU """ conv_hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) scope_fn = hyperparams_builder.build(conv_hyperparams_proto, is_training=True) scope = scope_fn() conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] self.assertEqual(conv_scope_arguments['activation_fn'], tf.nn.relu) def test_use_relu_activation_keras(self): conv_hyperparams_text_proto = """ regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } activation: RELU """ conv_hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) keras_config = hyperparams_builder.KerasLayerHyperparams( conv_hyperparams_proto) self.assertEqual(keras_config.params()['activation'], None) self.assertEqual( keras_config.params(include_activation=True)['activation'], tf.nn.relu) activation_layer = keras_config.build_activation_layer() self.assertTrue(isinstance(activation_layer, tf.keras.layers.Lambda)) self.assertEqual(activation_layer.function, tf.nn.relu) def test_use_relu_6_activation(self): conv_hyperparams_text_proto = """ regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } activation: RELU_6 """ conv_hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) scope_fn = hyperparams_builder.build(conv_hyperparams_proto, is_training=True) scope = scope_fn() conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] self.assertEqual(conv_scope_arguments['activation_fn'], tf.nn.relu6) def test_use_relu_6_activation_keras(self): conv_hyperparams_text_proto = """ regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } activation: RELU_6 """ conv_hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) keras_config = hyperparams_builder.KerasLayerHyperparams( conv_hyperparams_proto) self.assertEqual(keras_config.params()['activation'], None) self.assertEqual( keras_config.params(include_activation=True)['activation'], tf.nn.relu6) activation_layer = keras_config.build_activation_layer() self.assertTrue(isinstance(activation_layer, tf.keras.layers.Lambda)) self.assertEqual(activation_layer.function, tf.nn.relu6) def test_override_activation_keras(self): conv_hyperparams_text_proto = """ regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } activation: RELU_6 """ conv_hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) keras_config = hyperparams_builder.KerasLayerHyperparams( conv_hyperparams_proto) new_params = keras_config.params(activation=tf.nn.relu) self.assertEqual(new_params['activation'], tf.nn.relu) def _assert_variance_in_range(self, initializer, shape, variance, tol=1e-2): with tf.Graph().as_default() as g: with self.test_session(graph=g) as sess: var = tf.get_variable( name='test', shape=shape, dtype=tf.float32, initializer=initializer) sess.run(tf.global_variables_initializer()) values = sess.run(var) self.assertAllClose(np.var(values), variance, tol, tol) def test_variance_in_range_with_variance_scaling_initializer_fan_in(self): conv_hyperparams_text_proto = """ regularizer { l2_regularizer { } } initializer { variance_scaling_initializer { factor: 2.0 mode: FAN_IN uniform: false } } """ conv_hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) scope_fn = hyperparams_builder.build(conv_hyperparams_proto, is_training=True) scope = scope_fn() conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] initializer = conv_scope_arguments['weights_initializer'] self._assert_variance_in_range(initializer, shape=[100, 40], variance=2. / 100.) def test_variance_in_range_with_variance_scaling_initializer_fan_in_keras( self): conv_hyperparams_text_proto = """ regularizer { l2_regularizer { } } initializer { variance_scaling_initializer { factor: 2.0 mode: FAN_IN uniform: false } } """ conv_hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) keras_config = hyperparams_builder.KerasLayerHyperparams( conv_hyperparams_proto) initializer = keras_config.params()['kernel_initializer'] self._assert_variance_in_range(initializer, shape=[100, 40], variance=2. / 100.) def test_variance_in_range_with_variance_scaling_initializer_fan_out(self): conv_hyperparams_text_proto = """ regularizer { l2_regularizer { } } initializer { variance_scaling_initializer { factor: 2.0 mode: FAN_OUT uniform: false } } """ conv_hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) scope_fn = hyperparams_builder.build(conv_hyperparams_proto, is_training=True) scope = scope_fn() conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] initializer = conv_scope_arguments['weights_initializer'] self._assert_variance_in_range(initializer, shape=[100, 40], variance=2. / 40.) def test_variance_in_range_with_variance_scaling_initializer_fan_out_keras( self): conv_hyperparams_text_proto = """ regularizer { l2_regularizer { } } initializer { variance_scaling_initializer { factor: 2.0 mode: FAN_OUT uniform: false } } """ conv_hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) keras_config = hyperparams_builder.KerasLayerHyperparams( conv_hyperparams_proto) initializer = keras_config.params()['kernel_initializer'] self._assert_variance_in_range(initializer, shape=[100, 40], variance=2. / 40.) def test_variance_in_range_with_variance_scaling_initializer_fan_avg(self): conv_hyperparams_text_proto = """ regularizer { l2_regularizer { } } initializer { variance_scaling_initializer { factor: 2.0 mode: FAN_AVG uniform: false } } """ conv_hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) scope_fn = hyperparams_builder.build(conv_hyperparams_proto, is_training=True) scope = scope_fn() conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] initializer = conv_scope_arguments['weights_initializer'] self._assert_variance_in_range(initializer, shape=[100, 40], variance=4. / (100. + 40.)) def test_variance_in_range_with_variance_scaling_initializer_fan_avg_keras( self): conv_hyperparams_text_proto = """ regularizer { l2_regularizer { } } initializer { variance_scaling_initializer { factor: 2.0 mode: FAN_AVG uniform: false } } """ conv_hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) keras_config = hyperparams_builder.KerasLayerHyperparams( conv_hyperparams_proto) initializer = keras_config.params()['kernel_initializer'] self._assert_variance_in_range(initializer, shape=[100, 40], variance=4. / (100. + 40.)) def test_variance_in_range_with_variance_scaling_initializer_uniform(self): conv_hyperparams_text_proto = """ regularizer { l2_regularizer { } } initializer { variance_scaling_initializer { factor: 2.0 mode: FAN_IN uniform: true } } """ conv_hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) scope_fn = hyperparams_builder.build(conv_hyperparams_proto, is_training=True) scope = scope_fn() conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] initializer = conv_scope_arguments['weights_initializer'] self._assert_variance_in_range(initializer, shape=[100, 40], variance=2. / 100.) def test_variance_in_range_with_variance_scaling_initializer_uniform_keras( self): conv_hyperparams_text_proto = """ regularizer { l2_regularizer { } } initializer { variance_scaling_initializer { factor: 2.0 mode: FAN_IN uniform: true } } """ conv_hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) keras_config = hyperparams_builder.KerasLayerHyperparams( conv_hyperparams_proto) initializer = keras_config.params()['kernel_initializer'] self._assert_variance_in_range(initializer, shape=[100, 40], variance=2. / 100.) def test_variance_in_range_with_truncated_normal_initializer(self): conv_hyperparams_text_proto = """ regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { mean: 0.0 stddev: 0.8 } } """ conv_hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) scope_fn = hyperparams_builder.build(conv_hyperparams_proto, is_training=True) scope = scope_fn() conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] initializer = conv_scope_arguments['weights_initializer'] self._assert_variance_in_range(initializer, shape=[100, 40], variance=0.49, tol=1e-1) def test_variance_in_range_with_truncated_normal_initializer_keras(self): conv_hyperparams_text_proto = """ regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { mean: 0.0 stddev: 0.8 } } """ conv_hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) keras_config = hyperparams_builder.KerasLayerHyperparams( conv_hyperparams_proto) initializer = keras_config.params()['kernel_initializer'] self._assert_variance_in_range(initializer, shape=[100, 40], variance=0.49, tol=1e-1) def test_variance_in_range_with_random_normal_initializer(self): conv_hyperparams_text_proto = """ regularizer { l2_regularizer { } } initializer { random_normal_initializer { mean: 0.0 stddev: 0.8 } } """ conv_hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) scope_fn = hyperparams_builder.build(conv_hyperparams_proto, is_training=True) scope = scope_fn() conv_scope_arguments = scope[_get_scope_key(slim.conv2d)] initializer = conv_scope_arguments['weights_initializer'] self._assert_variance_in_range(initializer, shape=[100, 40], variance=0.64, tol=1e-1) def test_variance_in_range_with_random_normal_initializer_keras(self): conv_hyperparams_text_proto = """ regularizer { l2_regularizer { } } initializer { random_normal_initializer { mean: 0.0 stddev: 0.8 } } """ conv_hyperparams_proto = hyperparams_pb2.Hyperparams() text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams_proto) keras_config = hyperparams_builder.KerasLayerHyperparams( conv_hyperparams_proto) initializer = keras_config.params()['kernel_initializer'] self._assert_variance_in_range(initializer, shape=[100, 40], variance=0.64, tol=1e-1) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/builders/hyperparams_builder_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for losses_builder.""" import tensorflow as tf from google.protobuf import text_format from object_detection.builders import losses_builder from object_detection.core import losses from object_detection.protos import losses_pb2 from object_detection.utils import ops class LocalizationLossBuilderTest(tf.test.TestCase): def test_build_weighted_l2_localization_loss(self): losses_text_proto = """ localization_loss { weighted_l2 { } } classification_loss { weighted_softmax { } } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) _, localization_loss, _, _, _, _, _ = losses_builder.build(losses_proto) self.assertTrue(isinstance(localization_loss, losses.WeightedL2LocalizationLoss)) def test_build_weighted_smooth_l1_localization_loss_default_delta(self): losses_text_proto = """ localization_loss { weighted_smooth_l1 { } } classification_loss { weighted_softmax { } } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) _, localization_loss, _, _, _, _, _ = losses_builder.build(losses_proto) self.assertTrue(isinstance(localization_loss, losses.WeightedSmoothL1LocalizationLoss)) self.assertAlmostEqual(localization_loss._delta, 1.0) def test_build_weighted_smooth_l1_localization_loss_non_default_delta(self): losses_text_proto = """ localization_loss { weighted_smooth_l1 { delta: 0.1 } } classification_loss { weighted_softmax { } } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) _, localization_loss, _, _, _, _, _ = losses_builder.build(losses_proto) self.assertTrue(isinstance(localization_loss, losses.WeightedSmoothL1LocalizationLoss)) self.assertAlmostEqual(localization_loss._delta, 0.1) def test_build_weighted_iou_localization_loss(self): losses_text_proto = """ localization_loss { weighted_iou { } } classification_loss { weighted_softmax { } } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) _, localization_loss, _, _, _, _, _ = losses_builder.build(losses_proto) self.assertTrue(isinstance(localization_loss, losses.WeightedIOULocalizationLoss)) def test_anchorwise_output(self): losses_text_proto = """ localization_loss { weighted_smooth_l1 { } } classification_loss { weighted_softmax { } } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) _, localization_loss, _, _, _, _, _ = losses_builder.build(losses_proto) self.assertTrue(isinstance(localization_loss, losses.WeightedSmoothL1LocalizationLoss)) predictions = tf.constant([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]]]) targets = tf.constant([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]]]) weights = tf.constant([[1.0, 1.0]]) loss = localization_loss(predictions, targets, weights=weights) self.assertEqual(loss.shape, [1, 2]) def test_raise_error_on_empty_localization_config(self): losses_text_proto = """ classification_loss { weighted_softmax { } } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) with self.assertRaises(ValueError): losses_builder._build_localization_loss(losses_proto) class ClassificationLossBuilderTest(tf.test.TestCase): def test_build_weighted_sigmoid_classification_loss(self): losses_text_proto = """ classification_loss { weighted_sigmoid { } } localization_loss { weighted_l2 { } } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto) self.assertTrue(isinstance(classification_loss, losses.WeightedSigmoidClassificationLoss)) def test_build_weighted_sigmoid_focal_classification_loss(self): losses_text_proto = """ classification_loss { weighted_sigmoid_focal { } } localization_loss { weighted_l2 { } } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto) self.assertTrue(isinstance(classification_loss, losses.SigmoidFocalClassificationLoss)) self.assertAlmostEqual(classification_loss._alpha, None) self.assertAlmostEqual(classification_loss._gamma, 2.0) def test_build_weighted_sigmoid_focal_loss_non_default(self): losses_text_proto = """ classification_loss { weighted_sigmoid_focal { alpha: 0.25 gamma: 3.0 } } localization_loss { weighted_l2 { } } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto) self.assertTrue(isinstance(classification_loss, losses.SigmoidFocalClassificationLoss)) self.assertAlmostEqual(classification_loss._alpha, 0.25) self.assertAlmostEqual(classification_loss._gamma, 3.0) def test_build_weighted_softmax_classification_loss(self): losses_text_proto = """ classification_loss { weighted_softmax { } } localization_loss { weighted_l2 { } } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto) self.assertTrue(isinstance(classification_loss, losses.WeightedSoftmaxClassificationLoss)) def test_build_weighted_logits_softmax_classification_loss(self): losses_text_proto = """ classification_loss { weighted_logits_softmax { } } localization_loss { weighted_l2 { } } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto) self.assertTrue( isinstance(classification_loss, losses.WeightedSoftmaxClassificationAgainstLogitsLoss)) def test_build_weighted_softmax_classification_loss_with_logit_scale(self): losses_text_proto = """ classification_loss { weighted_softmax { logit_scale: 2.0 } } localization_loss { weighted_l2 { } } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto) self.assertTrue(isinstance(classification_loss, losses.WeightedSoftmaxClassificationLoss)) def test_build_bootstrapped_sigmoid_classification_loss(self): losses_text_proto = """ classification_loss { bootstrapped_sigmoid { alpha: 0.5 } } localization_loss { weighted_l2 { } } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto) self.assertTrue(isinstance(classification_loss, losses.BootstrappedSigmoidClassificationLoss)) def test_anchorwise_output(self): losses_text_proto = """ classification_loss { weighted_sigmoid { anchorwise_output: true } } localization_loss { weighted_l2 { } } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) classification_loss, _, _, _, _, _, _ = losses_builder.build(losses_proto) self.assertTrue(isinstance(classification_loss, losses.WeightedSigmoidClassificationLoss)) predictions = tf.constant([[[0.0, 1.0, 0.0], [0.0, 0.5, 0.5]]]) targets = tf.constant([[[0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]]) weights = tf.constant([[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]]) loss = classification_loss(predictions, targets, weights=weights) self.assertEqual(loss.shape, [1, 2, 3]) def test_raise_error_on_empty_config(self): losses_text_proto = """ localization_loss { weighted_l2 { } } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) with self.assertRaises(ValueError): losses_builder.build(losses_proto) class HardExampleMinerBuilderTest(tf.test.TestCase): def test_do_not_build_hard_example_miner_by_default(self): losses_text_proto = """ localization_loss { weighted_l2 { } } classification_loss { weighted_softmax { } } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) _, _, _, _, hard_example_miner, _, _ = losses_builder.build(losses_proto) self.assertEqual(hard_example_miner, None) def test_build_hard_example_miner_for_classification_loss(self): losses_text_proto = """ localization_loss { weighted_l2 { } } classification_loss { weighted_softmax { } } hard_example_miner { loss_type: CLASSIFICATION } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) _, _, _, _, hard_example_miner, _, _ = losses_builder.build(losses_proto) self.assertTrue(isinstance(hard_example_miner, losses.HardExampleMiner)) self.assertEqual(hard_example_miner._loss_type, 'cls') def test_build_hard_example_miner_for_localization_loss(self): losses_text_proto = """ localization_loss { weighted_l2 { } } classification_loss { weighted_softmax { } } hard_example_miner { loss_type: LOCALIZATION } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) _, _, _, _, hard_example_miner, _, _ = losses_builder.build(losses_proto) self.assertTrue(isinstance(hard_example_miner, losses.HardExampleMiner)) self.assertEqual(hard_example_miner._loss_type, 'loc') def test_build_hard_example_miner_with_non_default_values(self): losses_text_proto = """ localization_loss { weighted_l2 { } } classification_loss { weighted_softmax { } } hard_example_miner { num_hard_examples: 32 iou_threshold: 0.5 loss_type: LOCALIZATION max_negatives_per_positive: 10 min_negatives_per_image: 3 } """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) _, _, _, _, hard_example_miner, _, _ = losses_builder.build(losses_proto) self.assertTrue(isinstance(hard_example_miner, losses.HardExampleMiner)) self.assertEqual(hard_example_miner._num_hard_examples, 32) self.assertAlmostEqual(hard_example_miner._iou_threshold, 0.5) self.assertEqual(hard_example_miner._max_negatives_per_positive, 10) self.assertEqual(hard_example_miner._min_negatives_per_image, 3) class LossBuilderTest(tf.test.TestCase): def test_build_all_loss_parameters(self): losses_text_proto = """ localization_loss { weighted_l2 { } } classification_loss { weighted_softmax { } } hard_example_miner { } classification_weight: 0.8 localization_weight: 0.2 """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) (classification_loss, localization_loss, classification_weight, localization_weight, hard_example_miner, _, _) = losses_builder.build(losses_proto) self.assertTrue(isinstance(hard_example_miner, losses.HardExampleMiner)) self.assertTrue(isinstance(classification_loss, losses.WeightedSoftmaxClassificationLoss)) self.assertTrue(isinstance(localization_loss, losses.WeightedL2LocalizationLoss)) self.assertAlmostEqual(classification_weight, 0.8) self.assertAlmostEqual(localization_weight, 0.2) def test_build_expected_sampling(self): losses_text_proto = """ localization_loss { weighted_l2 { } } classification_loss { weighted_softmax { } } hard_example_miner { } classification_weight: 0.8 localization_weight: 0.2 """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) (classification_loss, localization_loss, classification_weight, localization_weight, hard_example_miner, _, _) = losses_builder.build(losses_proto) self.assertTrue(isinstance(hard_example_miner, losses.HardExampleMiner)) self.assertTrue( isinstance(classification_loss, losses.WeightedSoftmaxClassificationLoss)) self.assertTrue( isinstance(localization_loss, losses.WeightedL2LocalizationLoss)) self.assertAlmostEqual(classification_weight, 0.8) self.assertAlmostEqual(localization_weight, 0.2) def test_build_reweighting_unmatched_anchors(self): losses_text_proto = """ localization_loss { weighted_l2 { } } classification_loss { weighted_softmax { } } hard_example_miner { } classification_weight: 0.8 localization_weight: 0.2 """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) (classification_loss, localization_loss, classification_weight, localization_weight, hard_example_miner, _, _) = losses_builder.build(losses_proto) self.assertTrue(isinstance(hard_example_miner, losses.HardExampleMiner)) self.assertTrue( isinstance(classification_loss, losses.WeightedSoftmaxClassificationLoss)) self.assertTrue( isinstance(localization_loss, losses.WeightedL2LocalizationLoss)) self.assertAlmostEqual(classification_weight, 0.8) self.assertAlmostEqual(localization_weight, 0.2) def test_raise_error_when_both_focal_loss_and_hard_example_miner(self): losses_text_proto = """ localization_loss { weighted_l2 { } } classification_loss { weighted_sigmoid_focal { } } hard_example_miner { } classification_weight: 0.8 localization_weight: 0.2 """ losses_proto = losses_pb2.Loss() text_format.Merge(losses_text_proto, losses_proto) with self.assertRaises(ValueError): losses_builder.build(losses_proto) class FasterRcnnClassificationLossBuilderTest(tf.test.TestCase): def test_build_sigmoid_loss(self): losses_text_proto = """ weighted_sigmoid { } """ losses_proto = losses_pb2.ClassificationLoss() text_format.Merge(losses_text_proto, losses_proto) classification_loss = losses_builder.build_faster_rcnn_classification_loss( losses_proto) self.assertTrue(isinstance(classification_loss, losses.WeightedSigmoidClassificationLoss)) def test_build_softmax_loss(self): losses_text_proto = """ weighted_softmax { } """ losses_proto = losses_pb2.ClassificationLoss() text_format.Merge(losses_text_proto, losses_proto) classification_loss = losses_builder.build_faster_rcnn_classification_loss( losses_proto) self.assertTrue(isinstance(classification_loss, losses.WeightedSoftmaxClassificationLoss)) def test_build_logits_softmax_loss(self): losses_text_proto = """ weighted_logits_softmax { } """ losses_proto = losses_pb2.ClassificationLoss() text_format.Merge(losses_text_proto, losses_proto) classification_loss = losses_builder.build_faster_rcnn_classification_loss( losses_proto) self.assertTrue( isinstance(classification_loss, losses.WeightedSoftmaxClassificationAgainstLogitsLoss)) def test_build_sigmoid_focal_loss(self): losses_text_proto = """ weighted_sigmoid_focal { } """ losses_proto = losses_pb2.ClassificationLoss() text_format.Merge(losses_text_proto, losses_proto) classification_loss = losses_builder.build_faster_rcnn_classification_loss( losses_proto) self.assertTrue( isinstance(classification_loss, losses.SigmoidFocalClassificationLoss)) def test_build_softmax_loss_by_default(self): losses_text_proto = """ """ losses_proto = losses_pb2.ClassificationLoss() text_format.Merge(losses_text_proto, losses_proto) classification_loss = losses_builder.build_faster_rcnn_classification_loss( losses_proto) self.assertTrue(isinstance(classification_loss, losses.WeightedSoftmaxClassificationLoss)) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/builders/losses_builder_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A function to build localization and classification losses from config.""" import functools from object_detection.core import balanced_positive_negative_sampler as sampler from object_detection.core import losses from object_detection.protos import losses_pb2 from object_detection.utils import ops def build(loss_config): """Build losses based on the config. Builds classification, localization losses and optionally a hard example miner based on the config. Args: loss_config: A losses_pb2.Loss object. Returns: classification_loss: Classification loss object. localization_loss: Localization loss object. classification_weight: Classification loss weight. localization_weight: Localization loss weight. hard_example_miner: Hard example miner object. random_example_sampler: BalancedPositiveNegativeSampler object. Raises: ValueError: If hard_example_miner is used with sigmoid_focal_loss. ValueError: If random_example_sampler is getting non-positive value as desired positive example fraction. """ classification_loss = _build_classification_loss( loss_config.classification_loss) localization_loss = _build_localization_loss( loss_config.localization_loss) classification_weight = loss_config.classification_weight localization_weight = loss_config.localization_weight hard_example_miner = None if loss_config.HasField('hard_example_miner'): if (loss_config.classification_loss.WhichOneof('classification_loss') == 'weighted_sigmoid_focal'): raise ValueError('HardExampleMiner should not be used with sigmoid focal ' 'loss') hard_example_miner = build_hard_example_miner( loss_config.hard_example_miner, classification_weight, localization_weight) random_example_sampler = None if loss_config.HasField('random_example_sampler'): if loss_config.random_example_sampler.positive_sample_fraction <= 0: raise ValueError('RandomExampleSampler should not use non-positive' 'value as positive sample fraction.') random_example_sampler = sampler.BalancedPositiveNegativeSampler( positive_fraction=loss_config.random_example_sampler. positive_sample_fraction) if loss_config.expected_loss_weights == loss_config.NONE: expected_loss_weights_fn = None elif loss_config.expected_loss_weights == loss_config.EXPECTED_SAMPLING: expected_loss_weights_fn = functools.partial( ops.expected_classification_loss_by_expected_sampling, min_num_negative_samples=loss_config.min_num_negative_samples, desired_negative_sampling_ratio=loss_config .desired_negative_sampling_ratio) elif (loss_config.expected_loss_weights == loss_config .REWEIGHTING_UNMATCHED_ANCHORS): expected_loss_weights_fn = functools.partial( ops.expected_classification_loss_by_reweighting_unmatched_anchors, min_num_negative_samples=loss_config.min_num_negative_samples, desired_negative_sampling_ratio=loss_config .desired_negative_sampling_ratio) else: raise ValueError('Not a valid value for expected_classification_loss.') return (classification_loss, localization_loss, classification_weight, localization_weight, hard_example_miner, random_example_sampler, expected_loss_weights_fn) def build_hard_example_miner(config, classification_weight, localization_weight): """Builds hard example miner based on the config. Args: config: A losses_pb2.HardExampleMiner object. classification_weight: Classification loss weight. localization_weight: Localization loss weight. Returns: Hard example miner. """ loss_type = None if config.loss_type == losses_pb2.HardExampleMiner.BOTH: loss_type = 'both' if config.loss_type == losses_pb2.HardExampleMiner.CLASSIFICATION: loss_type = 'cls' if config.loss_type == losses_pb2.HardExampleMiner.LOCALIZATION: loss_type = 'loc' max_negatives_per_positive = None num_hard_examples = None if config.max_negatives_per_positive > 0: max_negatives_per_positive = config.max_negatives_per_positive if config.num_hard_examples > 0: num_hard_examples = config.num_hard_examples hard_example_miner = losses.HardExampleMiner( num_hard_examples=num_hard_examples, iou_threshold=config.iou_threshold, loss_type=loss_type, cls_loss_weight=classification_weight, loc_loss_weight=localization_weight, max_negatives_per_positive=max_negatives_per_positive, min_negatives_per_image=config.min_negatives_per_image) return hard_example_miner def build_faster_rcnn_classification_loss(loss_config): """Builds a classification loss for Faster RCNN based on the loss config. Args: loss_config: A losses_pb2.ClassificationLoss object. Returns: Loss based on the config. Raises: ValueError: On invalid loss_config. """ if not isinstance(loss_config, losses_pb2.ClassificationLoss): raise ValueError('loss_config not of type losses_pb2.ClassificationLoss.') loss_type = loss_config.WhichOneof('classification_loss') if loss_type == 'weighted_sigmoid': return losses.WeightedSigmoidClassificationLoss() if loss_type == 'weighted_softmax': config = loss_config.weighted_softmax return losses.WeightedSoftmaxClassificationLoss( logit_scale=config.logit_scale) if loss_type == 'weighted_logits_softmax': config = loss_config.weighted_logits_softmax return losses.WeightedSoftmaxClassificationAgainstLogitsLoss( logit_scale=config.logit_scale) if loss_type == 'weighted_sigmoid_focal': config = loss_config.weighted_sigmoid_focal alpha = None if config.HasField('alpha'): alpha = config.alpha return losses.SigmoidFocalClassificationLoss( gamma=config.gamma, alpha=alpha) # By default, Faster RCNN second stage classifier uses Softmax loss # with anchor-wise outputs. config = loss_config.weighted_softmax return losses.WeightedSoftmaxClassificationLoss( logit_scale=config.logit_scale) def _build_localization_loss(loss_config): """Builds a localization loss based on the loss config. Args: loss_config: A losses_pb2.LocalizationLoss object. Returns: Loss based on the config. Raises: ValueError: On invalid loss_config. """ if not isinstance(loss_config, losses_pb2.LocalizationLoss): raise ValueError('loss_config not of type losses_pb2.LocalizationLoss.') loss_type = loss_config.WhichOneof('localization_loss') if loss_type == 'weighted_l2': return losses.WeightedL2LocalizationLoss() if loss_type == 'weighted_smooth_l1': return losses.WeightedSmoothL1LocalizationLoss( loss_config.weighted_smooth_l1.delta) if loss_type == 'weighted_iou': return losses.WeightedIOULocalizationLoss() raise ValueError('Empty loss config.') def _build_classification_loss(loss_config): """Builds a classification loss based on the loss config. Args: loss_config: A losses_pb2.ClassificationLoss object. Returns: Loss based on the config. Raises: ValueError: On invalid loss_config. """ if not isinstance(loss_config, losses_pb2.ClassificationLoss): raise ValueError('loss_config not of type losses_pb2.ClassificationLoss.') loss_type = loss_config.WhichOneof('classification_loss') if loss_type == 'weighted_sigmoid': return losses.WeightedSigmoidClassificationLoss() if loss_type == 'weighted_sigmoid_focal': config = loss_config.weighted_sigmoid_focal alpha = None if config.HasField('alpha'): alpha = config.alpha return losses.SigmoidFocalClassificationLoss( gamma=config.gamma, alpha=alpha) if loss_type == 'weighted_softmax': config = loss_config.weighted_softmax return losses.WeightedSoftmaxClassificationLoss( logit_scale=config.logit_scale) if loss_type == 'weighted_logits_softmax': config = loss_config.weighted_logits_softmax return losses.WeightedSoftmaxClassificationAgainstLogitsLoss( logit_scale=config.logit_scale) if loss_type == 'bootstrapped_sigmoid': config = loss_config.bootstrapped_sigmoid return losses.BootstrappedSigmoidClassificationLoss( alpha=config.alpha, bootstrap_type=('hard' if config.hard_bootstrap else 'soft')) raise ValueError('Empty loss config.')
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/builders/losses_builder.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A function to build an object detection box coder from configuration.""" from object_detection.box_coders import faster_rcnn_box_coder from object_detection.box_coders import keypoint_box_coder from object_detection.box_coders import mean_stddev_box_coder from object_detection.box_coders import square_box_coder from object_detection.protos import box_coder_pb2 def build(box_coder_config): """Builds a box coder object based on the box coder config. Args: box_coder_config: A box_coder.proto object containing the config for the desired box coder. Returns: BoxCoder based on the config. Raises: ValueError: On empty box coder proto. """ if not isinstance(box_coder_config, box_coder_pb2.BoxCoder): raise ValueError('box_coder_config not of type box_coder_pb2.BoxCoder.') if box_coder_config.WhichOneof('box_coder_oneof') == 'faster_rcnn_box_coder': return faster_rcnn_box_coder.FasterRcnnBoxCoder(scale_factors=[ box_coder_config.faster_rcnn_box_coder.y_scale, box_coder_config.faster_rcnn_box_coder.x_scale, box_coder_config.faster_rcnn_box_coder.height_scale, box_coder_config.faster_rcnn_box_coder.width_scale ]) if box_coder_config.WhichOneof('box_coder_oneof') == 'keypoint_box_coder': return keypoint_box_coder.KeypointBoxCoder( box_coder_config.keypoint_box_coder.num_keypoints, scale_factors=[ box_coder_config.keypoint_box_coder.y_scale, box_coder_config.keypoint_box_coder.x_scale, box_coder_config.keypoint_box_coder.height_scale, box_coder_config.keypoint_box_coder.width_scale ]) if (box_coder_config.WhichOneof('box_coder_oneof') == 'mean_stddev_box_coder'): return mean_stddev_box_coder.MeanStddevBoxCoder( stddev=box_coder_config.mean_stddev_box_coder.stddev) if box_coder_config.WhichOneof('box_coder_oneof') == 'square_box_coder': return square_box_coder.SquareBoxCoder(scale_factors=[ box_coder_config.square_box_coder.y_scale, box_coder_config.square_box_coder.x_scale, box_coder_config.square_box_coder.length_scale ]) raise ValueError('Empty box coder.')
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/builders/box_coder_builder.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.trainer.""" import tensorflow as tf from google.protobuf import text_format from object_detection.core import losses from object_detection.core import model from object_detection.core import standard_fields as fields from object_detection.legacy import trainer from object_detection.protos import train_pb2 NUMBER_OF_CLASSES = 2 def get_input_function(): """A function to get test inputs. Returns an image with one box.""" image = tf.random_uniform([32, 32, 3], dtype=tf.float32) key = tf.constant('image_000000') class_label = tf.random_uniform( [1], minval=0, maxval=NUMBER_OF_CLASSES, dtype=tf.int32) box_label = tf.random_uniform( [1, 4], minval=0.4, maxval=0.6, dtype=tf.float32) multiclass_scores = tf.random_uniform( [1, NUMBER_OF_CLASSES], minval=0.4, maxval=0.6, dtype=tf.float32) return { fields.InputDataFields.image: image, fields.InputDataFields.key: key, fields.InputDataFields.groundtruth_classes: class_label, fields.InputDataFields.groundtruth_boxes: box_label, fields.InputDataFields.multiclass_scores: multiclass_scores } class FakeDetectionModel(model.DetectionModel): """A simple (and poor) DetectionModel for use in test.""" def __init__(self): super(FakeDetectionModel, self).__init__(num_classes=NUMBER_OF_CLASSES) self._classification_loss = losses.WeightedSigmoidClassificationLoss() self._localization_loss = losses.WeightedSmoothL1LocalizationLoss() def preprocess(self, inputs): """Input preprocessing, resizes images to 28x28. Args: inputs: a [batch, height_in, width_in, channels] float32 tensor representing a batch of images with values between 0 and 255.0. Returns: preprocessed_inputs: a [batch, 28, 28, channels] float32 tensor. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. """ true_image_shapes = [inputs.shape[:-1].as_list() for _ in range(inputs.shape[-1])] return tf.image.resize_images(inputs, [28, 28]), true_image_shapes def predict(self, preprocessed_inputs, true_image_shapes): """Prediction tensors from inputs tensor. Args: preprocessed_inputs: a [batch, 28, 28, channels] float32 tensor. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. Returns: prediction_dict: a dictionary holding prediction tensors to be passed to the Loss or Postprocess functions. """ flattened_inputs = tf.contrib.layers.flatten(preprocessed_inputs) class_prediction = tf.contrib.layers.fully_connected( flattened_inputs, self._num_classes) box_prediction = tf.contrib.layers.fully_connected(flattened_inputs, 4) return { 'class_predictions_with_background': tf.reshape( class_prediction, [-1, 1, self._num_classes]), 'box_encodings': tf.reshape(box_prediction, [-1, 1, 4]) } def postprocess(self, prediction_dict, true_image_shapes, **params): """Convert predicted output tensors to final detections. Unused. Args: prediction_dict: a dictionary holding prediction tensors. true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. **params: Additional keyword arguments for specific implementations of DetectionModel. Returns: detections: a dictionary with empty fields. """ return { 'detection_boxes': None, 'detection_scores': None, 'detection_classes': None, 'num_detections': None } def loss(self, prediction_dict, true_image_shapes): """Compute scalar loss tensors with respect to provided groundtruth. Calling this function requires that groundtruth tensors have been provided via the provide_groundtruth function. Args: prediction_dict: a dictionary holding predicted tensors true_image_shapes: int32 tensor of shape [batch, 3] where each row is of the form [height, width, channels] indicating the shapes of true images in the resized images, as resized images can be padded with zeros. Returns: a dictionary mapping strings (loss names) to scalar tensors representing loss values. """ batch_reg_targets = tf.stack( self.groundtruth_lists(fields.BoxListFields.boxes)) batch_cls_targets = tf.stack( self.groundtruth_lists(fields.BoxListFields.classes)) weights = tf.constant( 1.0, dtype=tf.float32, shape=[len(self.groundtruth_lists(fields.BoxListFields.boxes)), 1]) location_losses = self._localization_loss( prediction_dict['box_encodings'], batch_reg_targets, weights=weights) cls_losses = self._classification_loss( prediction_dict['class_predictions_with_background'], batch_cls_targets, weights=weights) loss_dict = { 'localization_loss': tf.reduce_sum(location_losses), 'classification_loss': tf.reduce_sum(cls_losses), } return loss_dict def regularization_losses(self): """Returns a list of regularization losses for this model. Returns a list of regularization losses for this model that the estimator needs to use during training/optimization. Returns: A list of regularization loss tensors. """ pass def restore_map(self, fine_tune_checkpoint_type='detection'): """Returns a map of variables to load from a foreign checkpoint. Args: fine_tune_checkpoint_type: whether to restore from a full detection checkpoint (with compatible variable names) or to restore from a classification checkpoint for initialization prior to training. Valid values: `detection`, `classification`. Default 'detection'. Returns: A dict mapping variable names to variables. """ return {var.op.name: var for var in tf.global_variables()} def updates(self): """Returns a list of update operators for this model. Returns a list of update operators for this model that must be executed at each training step. The estimator's train op needs to have a control dependency on these updates. Returns: A list of update operators. """ pass class TrainerTest(tf.test.TestCase): def test_configure_trainer_and_train_two_steps(self): train_config_text_proto = """ optimizer { adam_optimizer { learning_rate { constant_learning_rate { learning_rate: 0.01 } } } } data_augmentation_options { random_adjust_brightness { max_delta: 0.2 } } data_augmentation_options { random_adjust_contrast { min_delta: 0.7 max_delta: 1.1 } } num_steps: 2 """ train_config = train_pb2.TrainConfig() text_format.Merge(train_config_text_proto, train_config) train_dir = self.get_temp_dir() trainer.train( create_tensor_dict_fn=get_input_function, create_model_fn=FakeDetectionModel, train_config=train_config, master='', task=0, num_clones=1, worker_replicas=1, clone_on_cpu=True, ps_tasks=0, worker_job_name='worker', is_chief=True, train_dir=train_dir) def test_configure_trainer_with_multiclass_scores_and_train_two_steps(self): train_config_text_proto = """ optimizer { adam_optimizer { learning_rate { constant_learning_rate { learning_rate: 0.01 } } } } data_augmentation_options { random_adjust_brightness { max_delta: 0.2 } } data_augmentation_options { random_adjust_contrast { min_delta: 0.7 max_delta: 1.1 } } num_steps: 2 use_multiclass_scores: true """ train_config = train_pb2.TrainConfig() text_format.Merge(train_config_text_proto, train_config) train_dir = self.get_temp_dir() trainer.train(create_tensor_dict_fn=get_input_function, create_model_fn=FakeDetectionModel, train_config=train_config, master='', task=0, num_clones=1, worker_replicas=1, clone_on_cpu=True, ps_tasks=0, worker_job_name='worker', is_chief=True, train_dir=train_dir) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/legacy/trainer_test.py
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/legacy/__init__.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Training executable for detection models. This executable is used to train DetectionModels. There are two ways of configuring the training job: 1) A single pipeline_pb2.TrainEvalPipelineConfig configuration file can be specified by --pipeline_config_path. Example usage: ./train \ --logtostderr \ --train_dir=path/to/train_dir \ --pipeline_config_path=pipeline_config.pbtxt 2) Three configuration files can be provided: a model_pb2.DetectionModel configuration file to define what type of DetectionModel is being trained, an input_reader_pb2.InputReader file to specify what training data will be used and a train_pb2.TrainConfig file to configure training parameters. Example usage: ./train \ --logtostderr \ --train_dir=path/to/train_dir \ --model_config_path=model_config.pbtxt \ --train_config_path=train_config.pbtxt \ --input_config_path=train_input_config.pbtxt """ import functools import json import os import tensorflow as tf from object_detection.builders import dataset_builder from object_detection.builders import graph_rewriter_builder from object_detection.builders import model_builder from object_detection.legacy import trainer from object_detection.utils import config_util tf.logging.set_verbosity(tf.logging.INFO) flags = tf.app.flags flags.DEFINE_string('master', '', 'Name of the TensorFlow master to use.') flags.DEFINE_integer('task', 0, 'task id') flags.DEFINE_integer('num_clones', 1, 'Number of clones to deploy per worker.') flags.DEFINE_boolean('clone_on_cpu', False, 'Force clones to be deployed on CPU. Note that even if ' 'set to False (allowing ops to run on gpu), some ops may ' 'still be run on the CPU if they have no GPU kernel.') flags.DEFINE_integer('worker_replicas', 1, 'Number of worker+trainer ' 'replicas.') flags.DEFINE_integer('ps_tasks', 0, 'Number of parameter server tasks. If None, does not use ' 'a parameter server.') flags.DEFINE_string('train_dir', '', 'Directory to save the checkpoints and training summaries.') flags.DEFINE_string('pipeline_config_path', '', 'Path to a pipeline_pb2.TrainEvalPipelineConfig config ' 'file. If provided, other configs are ignored') flags.DEFINE_string('train_config_path', '', 'Path to a train_pb2.TrainConfig config file.') flags.DEFINE_string('input_config_path', '', 'Path to an input_reader_pb2.InputReader config file.') flags.DEFINE_string('model_config_path', '', 'Path to a model_pb2.DetectionModel config file.') FLAGS = flags.FLAGS @tf.contrib.framework.deprecated(None, 'Use object_detection/model_main.py.') def main(_): assert FLAGS.train_dir, '`train_dir` is missing.' if FLAGS.task == 0: tf.gfile.MakeDirs(FLAGS.train_dir) if FLAGS.pipeline_config_path: configs = config_util.get_configs_from_pipeline_file( FLAGS.pipeline_config_path) if FLAGS.task == 0: tf.gfile.Copy(FLAGS.pipeline_config_path, os.path.join(FLAGS.train_dir, 'pipeline.config'), overwrite=True) else: configs = config_util.get_configs_from_multiple_files( model_config_path=FLAGS.model_config_path, train_config_path=FLAGS.train_config_path, train_input_config_path=FLAGS.input_config_path) if FLAGS.task == 0: for name, config in [('model.config', FLAGS.model_config_path), ('train.config', FLAGS.train_config_path), ('input.config', FLAGS.input_config_path)]: tf.gfile.Copy(config, os.path.join(FLAGS.train_dir, name), overwrite=True) model_config = configs['model'] train_config = configs['train_config'] input_config = configs['train_input_config'] model_fn = functools.partial( model_builder.build, model_config=model_config, is_training=True) def get_next(config): return dataset_builder.make_initializable_iterator( dataset_builder.build(config)).get_next() create_input_dict_fn = functools.partial(get_next, input_config) env = json.loads(os.environ.get('TF_CONFIG', '{}')) cluster_data = env.get('cluster', None) cluster = tf.train.ClusterSpec(cluster_data) if cluster_data else None task_data = env.get('task', None) or {'type': 'master', 'index': 0} task_info = type('TaskSpec', (object,), task_data) # Parameters for a single worker. ps_tasks = 0 worker_replicas = 1 worker_job_name = 'lonely_worker' task = 0 is_chief = True master = '' if cluster_data and 'worker' in cluster_data: # Number of total worker replicas include "worker"s and the "master". worker_replicas = len(cluster_data['worker']) + 1 if cluster_data and 'ps' in cluster_data: ps_tasks = len(cluster_data['ps']) if worker_replicas > 1 and ps_tasks < 1: raise ValueError('At least 1 ps task is needed for distributed training.') if worker_replicas >= 1 and ps_tasks > 0: # Set up distributed training. server = tf.train.Server(tf.train.ClusterSpec(cluster), protocol='grpc', job_name=task_info.type, task_index=task_info.index) if task_info.type == 'ps': server.join() return worker_job_name = '%s/task:%d' % (task_info.type, task_info.index) task = task_info.index is_chief = (task_info.type == 'master') master = server.target graph_rewriter_fn = None if 'graph_rewriter_config' in configs: graph_rewriter_fn = graph_rewriter_builder.build( configs['graph_rewriter_config'], is_training=True) trainer.train( create_input_dict_fn, model_fn, train_config, master, task, FLAGS.num_clones, worker_replicas, FLAGS.clone_on_cpu, ps_tasks, worker_job_name, is_chief, FLAGS.train_dir, graph_hook_fn=graph_rewriter_fn) if __name__ == '__main__': tf.app.run()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/legacy/train.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Detection model trainer. This file provides a generic training method that can be used to train a DetectionModel. """ import functools import tensorflow as tf from object_detection.builders import optimizer_builder from object_detection.builders import preprocessor_builder from object_detection.core import batcher from object_detection.core import preprocessor from object_detection.core import standard_fields as fields from object_detection.utils import ops as util_ops from object_detection.utils import variables_helper from deployment import model_deploy slim = tf.contrib.slim def create_input_queue(batch_size_per_clone, create_tensor_dict_fn, batch_queue_capacity, num_batch_queue_threads, prefetch_queue_capacity, data_augmentation_options): """Sets up reader, prefetcher and returns input queue. Args: batch_size_per_clone: batch size to use per clone. create_tensor_dict_fn: function to create tensor dictionary. batch_queue_capacity: maximum number of elements to store within a queue. num_batch_queue_threads: number of threads to use for batching. prefetch_queue_capacity: maximum capacity of the queue used to prefetch assembled batches. data_augmentation_options: a list of tuples, where each tuple contains a data augmentation function and a dictionary containing arguments and their values (see preprocessor.py). Returns: input queue: a batcher.BatchQueue object holding enqueued tensor_dicts (which hold images, boxes and targets). To get a batch of tensor_dicts, call input_queue.Dequeue(). """ tensor_dict = create_tensor_dict_fn() tensor_dict[fields.InputDataFields.image] = tf.expand_dims( tensor_dict[fields.InputDataFields.image], 0) images = tensor_dict[fields.InputDataFields.image] float_images = tf.to_float(images) tensor_dict[fields.InputDataFields.image] = float_images include_instance_masks = (fields.InputDataFields.groundtruth_instance_masks in tensor_dict) include_keypoints = (fields.InputDataFields.groundtruth_keypoints in tensor_dict) include_multiclass_scores = (fields.InputDataFields.multiclass_scores in tensor_dict) if data_augmentation_options: tensor_dict = preprocessor.preprocess( tensor_dict, data_augmentation_options, func_arg_map=preprocessor.get_default_func_arg_map( include_label_weights=True, include_multiclass_scores=include_multiclass_scores, include_instance_masks=include_instance_masks, include_keypoints=include_keypoints)) input_queue = batcher.BatchQueue( tensor_dict, batch_size=batch_size_per_clone, batch_queue_capacity=batch_queue_capacity, num_batch_queue_threads=num_batch_queue_threads, prefetch_queue_capacity=prefetch_queue_capacity) return input_queue def get_inputs(input_queue, num_classes, merge_multiple_label_boxes=False, use_multiclass_scores=False): """Dequeues batch and constructs inputs to object detection model. Args: input_queue: BatchQueue object holding enqueued tensor_dicts. num_classes: Number of classes. merge_multiple_label_boxes: Whether to merge boxes with multiple labels or not. Defaults to false. Merged boxes are represented with a single box and a k-hot encoding of the multiple labels associated with the boxes. use_multiclass_scores: Whether to use multiclass scores instead of groundtruth_classes. Returns: images: a list of 3-D float tensor of images. image_keys: a list of string keys for the images. locations_list: a list of tensors of shape [num_boxes, 4] containing the corners of the groundtruth boxes. classes_list: a list of padded one-hot (or K-hot) float32 tensors containing target classes. masks_list: a list of 3-D float tensors of shape [num_boxes, image_height, image_width] containing instance masks for objects if present in the input_queue. Else returns None. keypoints_list: a list of 3-D float tensors of shape [num_boxes, num_keypoints, 2] containing keypoints for objects if present in the input queue. Else returns None. weights_lists: a list of 1-D float32 tensors of shape [num_boxes] containing groundtruth weight for each box. """ read_data_list = input_queue.dequeue() label_id_offset = 1 def extract_images_and_targets(read_data): """Extract images and targets from the input dict.""" image = read_data[fields.InputDataFields.image] key = '' if fields.InputDataFields.source_id in read_data: key = read_data[fields.InputDataFields.source_id] location_gt = read_data[fields.InputDataFields.groundtruth_boxes] classes_gt = tf.cast(read_data[fields.InputDataFields.groundtruth_classes], tf.int32) classes_gt -= label_id_offset if merge_multiple_label_boxes and use_multiclass_scores: raise ValueError( 'Using both merge_multiple_label_boxes and use_multiclass_scores is' 'not supported' ) if merge_multiple_label_boxes: location_gt, classes_gt, _ = util_ops.merge_boxes_with_multiple_labels( location_gt, classes_gt, num_classes) classes_gt = tf.cast(classes_gt, tf.float32) elif use_multiclass_scores: classes_gt = tf.cast(read_data[fields.InputDataFields.multiclass_scores], tf.float32) else: classes_gt = util_ops.padded_one_hot_encoding( indices=classes_gt, depth=num_classes, left_pad=0) masks_gt = read_data.get(fields.InputDataFields.groundtruth_instance_masks) keypoints_gt = read_data.get(fields.InputDataFields.groundtruth_keypoints) if (merge_multiple_label_boxes and ( masks_gt is not None or keypoints_gt is not None)): raise NotImplementedError('Multi-label support is only for boxes.') weights_gt = read_data.get( fields.InputDataFields.groundtruth_weights) return (image, key, location_gt, classes_gt, masks_gt, keypoints_gt, weights_gt) return zip(*map(extract_images_and_targets, read_data_list)) def _create_losses(input_queue, create_model_fn, train_config): """Creates loss function for a DetectionModel. Args: input_queue: BatchQueue object holding enqueued tensor_dicts. create_model_fn: A function to create the DetectionModel. train_config: a train_pb2.TrainConfig protobuf. """ detection_model = create_model_fn() (images, _, groundtruth_boxes_list, groundtruth_classes_list, groundtruth_masks_list, groundtruth_keypoints_list, groundtruth_weights_list) = get_inputs( input_queue, detection_model.num_classes, train_config.merge_multiple_label_boxes, train_config.use_multiclass_scores) preprocessed_images = [] true_image_shapes = [] for image in images: resized_image, true_image_shape = detection_model.preprocess(image) preprocessed_images.append(resized_image) true_image_shapes.append(true_image_shape) images = tf.concat(preprocessed_images, 0) true_image_shapes = tf.concat(true_image_shapes, 0) if any(mask is None for mask in groundtruth_masks_list): groundtruth_masks_list = None if any(keypoints is None for keypoints in groundtruth_keypoints_list): groundtruth_keypoints_list = None detection_model.provide_groundtruth( groundtruth_boxes_list, groundtruth_classes_list, groundtruth_masks_list, groundtruth_keypoints_list, groundtruth_weights_list=groundtruth_weights_list) prediction_dict = detection_model.predict(images, true_image_shapes) losses_dict = detection_model.loss(prediction_dict, true_image_shapes) for loss_tensor in losses_dict.values(): tf.losses.add_loss(loss_tensor) def train(create_tensor_dict_fn, create_model_fn, train_config, master, task, num_clones, worker_replicas, clone_on_cpu, ps_tasks, worker_job_name, is_chief, train_dir, graph_hook_fn=None): """Training function for detection models. Args: create_tensor_dict_fn: a function to create a tensor input dictionary. create_model_fn: a function that creates a DetectionModel and generates losses. train_config: a train_pb2.TrainConfig protobuf. master: BNS name of the TensorFlow master to use. task: The task id of this training instance. num_clones: The number of clones to run per machine. worker_replicas: The number of work replicas to train with. clone_on_cpu: True if clones should be forced to run on CPU. ps_tasks: Number of parameter server tasks. worker_job_name: Name of the worker job. is_chief: Whether this replica is the chief replica. train_dir: Directory to write checkpoints and training summaries to. graph_hook_fn: Optional function that is called after the inference graph is built (before optimization). This is helpful to perform additional changes to the training graph such as adding FakeQuant ops. The function should modify the default graph. Raises: ValueError: If both num_clones > 1 and train_config.sync_replicas is true. """ detection_model = create_model_fn() data_augmentation_options = [ preprocessor_builder.build(step) for step in train_config.data_augmentation_options] with tf.Graph().as_default(): # Build a configuration specifying multi-GPU and multi-replicas. deploy_config = model_deploy.DeploymentConfig( num_clones=num_clones, clone_on_cpu=clone_on_cpu, replica_id=task, num_replicas=worker_replicas, num_ps_tasks=ps_tasks, worker_job_name=worker_job_name) # Place the global step on the device storing the variables. with tf.device(deploy_config.variables_device()): global_step = slim.create_global_step() if num_clones != 1 and train_config.sync_replicas: raise ValueError('In Synchronous SGD mode num_clones must ', 'be 1. Found num_clones: {}'.format(num_clones)) batch_size = train_config.batch_size // num_clones if train_config.sync_replicas: batch_size //= train_config.replicas_to_aggregate with tf.device(deploy_config.inputs_device()): input_queue = create_input_queue( batch_size, create_tensor_dict_fn, train_config.batch_queue_capacity, train_config.num_batch_queue_threads, train_config.prefetch_queue_capacity, data_augmentation_options) # Gather initial summaries. # TODO(rathodv): See if summaries can be added/extracted from global tf # collections so that they don't have to be passed around. summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES)) global_summaries = set([]) model_fn = functools.partial(_create_losses, create_model_fn=create_model_fn, train_config=train_config) clones = model_deploy.create_clones(deploy_config, model_fn, [input_queue]) first_clone_scope = clones[0].scope if graph_hook_fn: with tf.device(deploy_config.variables_device()): graph_hook_fn() # Gather update_ops from the first clone. These contain, for example, # the updates for the batch_norm variables created by model_fn. update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope) with tf.device(deploy_config.optimizer_device()): training_optimizer, optimizer_summary_vars = optimizer_builder.build( train_config.optimizer) for var in optimizer_summary_vars: tf.summary.scalar(var.op.name, var, family='LearningRate') sync_optimizer = None if train_config.sync_replicas: training_optimizer = tf.train.SyncReplicasOptimizer( training_optimizer, replicas_to_aggregate=train_config.replicas_to_aggregate, total_num_replicas=worker_replicas) sync_optimizer = training_optimizer with tf.device(deploy_config.optimizer_device()): regularization_losses = (None if train_config.add_regularization_loss else []) total_loss, grads_and_vars = model_deploy.optimize_clones( clones, training_optimizer, regularization_losses=regularization_losses) total_loss = tf.check_numerics(total_loss, 'LossTensor is inf or nan.') # Optionally multiply bias gradients by train_config.bias_grad_multiplier. if train_config.bias_grad_multiplier: biases_regex_list = ['.*/biases'] grads_and_vars = variables_helper.multiply_gradients_matching_regex( grads_and_vars, biases_regex_list, multiplier=train_config.bias_grad_multiplier) # Optionally freeze some layers by setting their gradients to be zero. if train_config.freeze_variables: grads_and_vars = variables_helper.freeze_gradients_matching_regex( grads_and_vars, train_config.freeze_variables) # Optionally clip gradients if train_config.gradient_clipping_by_norm > 0: with tf.name_scope('clip_grads'): grads_and_vars = slim.learning.clip_gradient_norms( grads_and_vars, train_config.gradient_clipping_by_norm) # Create gradient updates. grad_updates = training_optimizer.apply_gradients(grads_and_vars, global_step=global_step) update_ops.append(grad_updates) update_op = tf.group(*update_ops, name='update_barrier') with tf.control_dependencies([update_op]): train_tensor = tf.identity(total_loss, name='train_op') # Add summaries. for model_var in slim.get_model_variables(): global_summaries.add(tf.summary.histogram('ModelVars/' + model_var.op.name, model_var)) for loss_tensor in tf.losses.get_losses(): global_summaries.add(tf.summary.scalar('Losses/' + loss_tensor.op.name, loss_tensor)) global_summaries.add( tf.summary.scalar('Losses/TotalLoss', tf.losses.get_total_loss())) # Add the summaries from the first clone. These contain the summaries # created by model_fn and either optimize_clones() or _gather_clone_loss(). summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES, first_clone_scope)) summaries |= global_summaries # Merge all summaries together. summary_op = tf.summary.merge(list(summaries), name='summary_op') # Soft placement allows placing on CPU ops without GPU implementation. session_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False) # Save checkpoints regularly. keep_checkpoint_every_n_hours = train_config.keep_checkpoint_every_n_hours saver = tf.train.Saver( keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours) # Create ops required to initialize the model from a given checkpoint. init_fn = None if train_config.fine_tune_checkpoint: if not train_config.fine_tune_checkpoint_type: # train_config.from_detection_checkpoint field is deprecated. For # backward compatibility, fine_tune_checkpoint_type is set based on # from_detection_checkpoint. if train_config.from_detection_checkpoint: train_config.fine_tune_checkpoint_type = 'detection' else: train_config.fine_tune_checkpoint_type = 'classification' var_map = detection_model.restore_map( fine_tune_checkpoint_type=train_config.fine_tune_checkpoint_type, load_all_detection_checkpoint_vars=( train_config.load_all_detection_checkpoint_vars)) available_var_map = (variables_helper. get_variables_available_in_checkpoint( var_map, train_config.fine_tune_checkpoint, include_global_step=False)) init_saver = tf.train.Saver(available_var_map) def initializer_fn(sess): init_saver.restore(sess, train_config.fine_tune_checkpoint) init_fn = initializer_fn slim.learning.train( train_tensor, logdir=train_dir, master=master, is_chief=is_chief, session_config=session_config, startup_delay_steps=train_config.startup_delay_steps, init_fn=init_fn, summary_op=summary_op, number_of_steps=( train_config.num_steps if train_config.num_steps else None), save_summaries_secs=120, sync_optimizer=sync_optimizer, saver=saver)
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/legacy/trainer.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Evaluation executable for detection models. This executable is used to evaluate DetectionModels. There are two ways of configuring the eval job. 1) A single pipeline_pb2.TrainEvalPipelineConfig file maybe specified instead. In this mode, the --eval_training_data flag may be given to force the pipeline to evaluate on training data instead. Example usage: ./eval \ --logtostderr \ --checkpoint_dir=path/to/checkpoint_dir \ --eval_dir=path/to/eval_dir \ --pipeline_config_path=pipeline_config.pbtxt 2) Three configuration files may be provided: a model_pb2.DetectionModel configuration file to define what type of DetectionModel is being evaluated, an input_reader_pb2.InputReader file to specify what data the model is evaluating and an eval_pb2.EvalConfig file to configure evaluation parameters. Example usage: ./eval \ --logtostderr \ --checkpoint_dir=path/to/checkpoint_dir \ --eval_dir=path/to/eval_dir \ --eval_config_path=eval_config.pbtxt \ --model_config_path=model_config.pbtxt \ --input_config_path=eval_input_config.pbtxt """ import functools import os import tensorflow as tf from object_detection.builders import dataset_builder from object_detection.builders import graph_rewriter_builder from object_detection.builders import model_builder from object_detection.legacy import evaluator from object_detection.utils import config_util from object_detection.utils import label_map_util tf.logging.set_verbosity(tf.logging.INFO) flags = tf.app.flags flags.DEFINE_boolean('eval_training_data', False, 'If training data should be evaluated for this job.') flags.DEFINE_string( 'checkpoint_dir', '', 'Directory containing checkpoints to evaluate, typically ' 'set to `train_dir` used in the training job.') flags.DEFINE_string('eval_dir', '', 'Directory to write eval summaries to.') flags.DEFINE_string( 'pipeline_config_path', '', 'Path to a pipeline_pb2.TrainEvalPipelineConfig config ' 'file. If provided, other configs are ignored') flags.DEFINE_string('eval_config_path', '', 'Path to an eval_pb2.EvalConfig config file.') flags.DEFINE_string('input_config_path', '', 'Path to an input_reader_pb2.InputReader config file.') flags.DEFINE_string('model_config_path', '', 'Path to a model_pb2.DetectionModel config file.') flags.DEFINE_boolean( 'run_once', False, 'Option to only run a single pass of ' 'evaluation. Overrides the `max_evals` parameter in the ' 'provided config.') FLAGS = flags.FLAGS @tf.contrib.framework.deprecated(None, 'Use object_detection/model_main.py.') def main(unused_argv): assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.' assert FLAGS.eval_dir, '`eval_dir` is missing.' tf.gfile.MakeDirs(FLAGS.eval_dir) if FLAGS.pipeline_config_path: configs = config_util.get_configs_from_pipeline_file( FLAGS.pipeline_config_path) tf.gfile.Copy( FLAGS.pipeline_config_path, os.path.join(FLAGS.eval_dir, 'pipeline.config'), overwrite=True) else: configs = config_util.get_configs_from_multiple_files( model_config_path=FLAGS.model_config_path, eval_config_path=FLAGS.eval_config_path, eval_input_config_path=FLAGS.input_config_path) for name, config in [('model.config', FLAGS.model_config_path), ('eval.config', FLAGS.eval_config_path), ('input.config', FLAGS.input_config_path)]: tf.gfile.Copy(config, os.path.join(FLAGS.eval_dir, name), overwrite=True) model_config = configs['model'] eval_config = configs['eval_config'] input_config = configs['eval_input_config'] if FLAGS.eval_training_data: input_config = configs['train_input_config'] model_fn = functools.partial( model_builder.build, model_config=model_config, is_training=False) def get_next(config): return dataset_builder.make_initializable_iterator( dataset_builder.build(config)).get_next() create_input_dict_fn = functools.partial(get_next, input_config) categories = label_map_util.create_categories_from_labelmap( input_config.label_map_path) if FLAGS.run_once: eval_config.max_evals = 1 graph_rewriter_fn = None if 'graph_rewriter_config' in configs: graph_rewriter_fn = graph_rewriter_builder.build( configs['graph_rewriter_config'], is_training=False) evaluator.evaluate( create_input_dict_fn, model_fn, eval_config, categories, FLAGS.checkpoint_dir, FLAGS.eval_dir, graph_hook_fn=graph_rewriter_fn) if __name__ == '__main__': tf.app.run()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/legacy/eval.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Detection model evaluator. This file provides a generic evaluation method that can be used to evaluate a DetectionModel. """ import logging import tensorflow as tf from object_detection import eval_util from object_detection.core import prefetcher from object_detection.core import standard_fields as fields from object_detection.metrics import coco_evaluation from object_detection.utils import object_detection_evaluation # A dictionary of metric names to classes that implement the metric. The classes # in the dictionary must implement # utils.object_detection_evaluation.DetectionEvaluator interface. EVAL_METRICS_CLASS_DICT = { 'pascal_voc_detection_metrics': object_detection_evaluation.PascalDetectionEvaluator, 'weighted_pascal_voc_detection_metrics': object_detection_evaluation.WeightedPascalDetectionEvaluator, 'pascal_voc_instance_segmentation_metrics': object_detection_evaluation.PascalInstanceSegmentationEvaluator, 'weighted_pascal_voc_instance_segmentation_metrics': object_detection_evaluation.WeightedPascalInstanceSegmentationEvaluator, 'oid_V2_detection_metrics': object_detection_evaluation.OpenImagesDetectionEvaluator, # DEPRECATED: please use oid_V2_detection_metrics instead 'open_images_V2_detection_metrics': object_detection_evaluation.OpenImagesDetectionEvaluator, 'coco_detection_metrics': coco_evaluation.CocoDetectionEvaluator, 'coco_mask_metrics': coco_evaluation.CocoMaskEvaluator, 'oid_challenge_detection_metrics': object_detection_evaluation.OpenImagesDetectionChallengeEvaluator, # DEPRECATED: please use oid_challenge_detection_metrics instead 'oid_challenge_object_detection_metrics': object_detection_evaluation.OpenImagesDetectionChallengeEvaluator, } EVAL_DEFAULT_METRIC = 'pascal_voc_detection_metrics' def _extract_predictions_and_losses(model, create_input_dict_fn, ignore_groundtruth=False): """Constructs tensorflow detection graph and returns output tensors. Args: model: model to perform predictions with. create_input_dict_fn: function to create input tensor dictionaries. ignore_groundtruth: whether groundtruth should be ignored. Returns: prediction_groundtruth_dict: A dictionary with postprocessed tensors (keyed by standard_fields.DetectionResultsFields) and optional groundtruth tensors (keyed by standard_fields.InputDataFields). losses_dict: A dictionary containing detection losses. This is empty when ignore_groundtruth is true. """ input_dict = create_input_dict_fn() prefetch_queue = prefetcher.prefetch(input_dict, capacity=500) input_dict = prefetch_queue.dequeue() original_image = tf.expand_dims(input_dict[fields.InputDataFields.image], 0) preprocessed_image, true_image_shapes = model.preprocess( tf.to_float(original_image)) prediction_dict = model.predict(preprocessed_image, true_image_shapes) detections = model.postprocess(prediction_dict, true_image_shapes) groundtruth = None losses_dict = {} if not ignore_groundtruth: groundtruth = { fields.InputDataFields.groundtruth_boxes: input_dict[fields.InputDataFields.groundtruth_boxes], fields.InputDataFields.groundtruth_classes: input_dict[fields.InputDataFields.groundtruth_classes], fields.InputDataFields.groundtruth_area: input_dict[fields.InputDataFields.groundtruth_area], fields.InputDataFields.groundtruth_is_crowd: input_dict[fields.InputDataFields.groundtruth_is_crowd], fields.InputDataFields.groundtruth_difficult: input_dict[fields.InputDataFields.groundtruth_difficult] } if fields.InputDataFields.groundtruth_group_of in input_dict: groundtruth[fields.InputDataFields.groundtruth_group_of] = ( input_dict[fields.InputDataFields.groundtruth_group_of]) groundtruth_masks_list = None if fields.DetectionResultFields.detection_masks in detections: groundtruth[fields.InputDataFields.groundtruth_instance_masks] = ( input_dict[fields.InputDataFields.groundtruth_instance_masks]) groundtruth_masks_list = [ input_dict[fields.InputDataFields.groundtruth_instance_masks]] groundtruth_keypoints_list = None if fields.DetectionResultFields.detection_keypoints in detections: groundtruth[fields.InputDataFields.groundtruth_keypoints] = ( input_dict[fields.InputDataFields.groundtruth_keypoints]) groundtruth_keypoints_list = [ input_dict[fields.InputDataFields.groundtruth_keypoints]] label_id_offset = 1 model.provide_groundtruth( [input_dict[fields.InputDataFields.groundtruth_boxes]], [tf.one_hot(input_dict[fields.InputDataFields.groundtruth_classes] - label_id_offset, depth=model.num_classes)], groundtruth_masks_list, groundtruth_keypoints_list) losses_dict.update(model.loss(prediction_dict, true_image_shapes)) result_dict = eval_util.result_dict_for_single_example( original_image, input_dict[fields.InputDataFields.source_id], detections, groundtruth, class_agnostic=( fields.DetectionResultFields.detection_classes not in detections), scale_to_absolute=True) return result_dict, losses_dict def get_evaluators(eval_config, categories): """Returns the evaluator class according to eval_config, valid for categories. Args: eval_config: evaluation configurations. categories: a list of categories to evaluate. Returns: An list of instances of DetectionEvaluator. Raises: ValueError: if metric is not in the metric class dictionary. """ eval_metric_fn_keys = eval_config.metrics_set if not eval_metric_fn_keys: eval_metric_fn_keys = [EVAL_DEFAULT_METRIC] evaluators_list = [] for eval_metric_fn_key in eval_metric_fn_keys: if eval_metric_fn_key not in EVAL_METRICS_CLASS_DICT: raise ValueError('Metric not found: {}'.format(eval_metric_fn_key)) if eval_metric_fn_key == 'oid_challenge_object_detection_metrics': logging.warning( 'oid_challenge_object_detection_metrics is deprecated; ' 'use oid_challenge_detection_metrics instead' ) if eval_metric_fn_key == 'oid_V2_detection_metrics': logging.warning( 'open_images_V2_detection_metrics is deprecated; ' 'use oid_V2_detection_metrics instead' ) evaluators_list.append( EVAL_METRICS_CLASS_DICT[eval_metric_fn_key](categories=categories)) return evaluators_list def evaluate(create_input_dict_fn, create_model_fn, eval_config, categories, checkpoint_dir, eval_dir, graph_hook_fn=None, evaluator_list=None): """Evaluation function for detection models. Args: create_input_dict_fn: a function to create a tensor input dictionary. create_model_fn: a function that creates a DetectionModel. eval_config: a eval_pb2.EvalConfig protobuf. categories: a list of category dictionaries. Each dict in the list should have an integer 'id' field and string 'name' field. checkpoint_dir: directory to load the checkpoints to evaluate from. eval_dir: directory to write evaluation metrics summary to. graph_hook_fn: Optional function that is called after the training graph is completely built. This is helpful to perform additional changes to the training graph such as optimizing batchnorm. The function should modify the default graph. evaluator_list: Optional list of instances of DetectionEvaluator. If not given, this list of metrics is created according to the eval_config. Returns: metrics: A dictionary containing metric names and values from the latest run. """ model = create_model_fn() if eval_config.ignore_groundtruth and not eval_config.export_path: logging.fatal('If ignore_groundtruth=True then an export_path is ' 'required. Aborting!!!') tensor_dict, losses_dict = _extract_predictions_and_losses( model=model, create_input_dict_fn=create_input_dict_fn, ignore_groundtruth=eval_config.ignore_groundtruth) def _process_batch(tensor_dict, sess, batch_index, counters, losses_dict=None): """Evaluates tensors in tensor_dict, losses_dict and visualizes examples. This function calls sess.run on tensor_dict, evaluating the original_image tensor only on the first K examples and visualizing detections overlaid on this original_image. Args: tensor_dict: a dictionary of tensors sess: tensorflow session batch_index: the index of the batch amongst all batches in the run. counters: a dictionary holding 'success' and 'skipped' fields which can be updated to keep track of number of successful and failed runs, respectively. If these fields are not updated, then the success/skipped counter values shown at the end of evaluation will be incorrect. losses_dict: Optional dictonary of scalar loss tensors. Returns: result_dict: a dictionary of numpy arrays result_losses_dict: a dictionary of scalar losses. This is empty if input losses_dict is None. """ try: if not losses_dict: losses_dict = {} result_dict, result_losses_dict = sess.run([tensor_dict, losses_dict]) counters['success'] += 1 except tf.errors.InvalidArgumentError: logging.info('Skipping image') counters['skipped'] += 1 return {}, {} global_step = tf.train.global_step(sess, tf.train.get_global_step()) if batch_index < eval_config.num_visualizations: tag = 'image-{}'.format(batch_index) eval_util.visualize_detection_results( result_dict, tag, global_step, categories=categories, summary_dir=eval_dir, export_dir=eval_config.visualization_export_dir, show_groundtruth=eval_config.visualize_groundtruth_boxes, groundtruth_box_visualization_color=eval_config. groundtruth_box_visualization_color, min_score_thresh=eval_config.min_score_threshold, max_num_predictions=eval_config.max_num_boxes_to_visualize, skip_scores=eval_config.skip_scores, skip_labels=eval_config.skip_labels, keep_image_id_for_visualization_export=eval_config. keep_image_id_for_visualization_export) return result_dict, result_losses_dict if graph_hook_fn: graph_hook_fn() variables_to_restore = tf.global_variables() global_step = tf.train.get_or_create_global_step() variables_to_restore.append(global_step) if eval_config.use_moving_averages: variable_averages = tf.train.ExponentialMovingAverage(0.0) variables_to_restore = variable_averages.variables_to_restore() saver = tf.train.Saver(variables_to_restore) def _restore_latest_checkpoint(sess): latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir) saver.restore(sess, latest_checkpoint) if not evaluator_list: evaluator_list = get_evaluators(eval_config, categories) metrics = eval_util.repeated_checkpoint_run( tensor_dict=tensor_dict, summary_dir=eval_dir, evaluators=evaluator_list, batch_processor=_process_batch, checkpoint_dirs=[checkpoint_dir], variables_to_restore=None, restore_fn=_restore_latest_checkpoint, num_batches=eval_config.num_examples, eval_interval_secs=eval_config.eval_interval_secs, max_number_of_evaluations=(1 if eval_config.ignore_groundtruth else eval_config.max_evals if eval_config.max_evals else None), master=eval_config.eval_master, save_graph=eval_config.save_graph, save_graph_dir=(eval_dir if eval_config.save_graph else ''), losses_dict=losses_dict, eval_export_path=eval_config.export_path) return metrics
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/legacy/evaluator.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.predictors.convolutional_box_predictor.""" import numpy as np import tensorflow as tf from google.protobuf import text_format from object_detection.builders import box_predictor_builder from object_detection.builders import hyperparams_builder from object_detection.predictors import convolutional_box_predictor as box_predictor from object_detection.protos import hyperparams_pb2 from object_detection.utils import test_case class ConvolutionalBoxPredictorTest(test_case.TestCase): def _build_arg_scope_with_conv_hyperparams(self): conv_hyperparams = hyperparams_pb2.Hyperparams() conv_hyperparams_text_proto = """ activation: RELU_6 regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) return hyperparams_builder.build(conv_hyperparams, is_training=True) def test_get_boxes_for_five_aspect_ratios_per_location(self): def graph_fn(image_features): conv_box_predictor = ( box_predictor_builder.build_convolutional_box_predictor( is_training=False, num_classes=0, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), min_depth=0, max_depth=32, num_layers_before_predictor=1, use_dropout=True, dropout_keep_prob=0.8, kernel_size=1, box_code_size=4)) box_predictions = conv_box_predictor.predict( [image_features], num_predictions_per_location=[5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) objectness_predictions = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, objectness_predictions) image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) (box_encodings, objectness_predictions) = self.execute(graph_fn, [image_features]) self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4]) self.assertAllEqual(objectness_predictions.shape, [4, 320, 1]) def test_get_boxes_for_one_aspect_ratio_per_location(self): def graph_fn(image_features): conv_box_predictor = ( box_predictor_builder.build_convolutional_box_predictor( is_training=False, num_classes=0, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), min_depth=0, max_depth=32, num_layers_before_predictor=1, use_dropout=True, dropout_keep_prob=0.8, kernel_size=1, box_code_size=4)) box_predictions = conv_box_predictor.predict( [image_features], num_predictions_per_location=[1], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) objectness_predictions = tf.concat(box_predictions[ box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, objectness_predictions) image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) (box_encodings, objectness_predictions) = self.execute(graph_fn, [image_features]) self.assertAllEqual(box_encodings.shape, [4, 64, 1, 4]) self.assertAllEqual(objectness_predictions.shape, [4, 64, 1]) def test_get_multi_class_predictions_for_five_aspect_ratios_per_location( self): num_classes_without_background = 6 image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) def graph_fn(image_features): conv_box_predictor = ( box_predictor_builder.build_convolutional_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), min_depth=0, max_depth=32, num_layers_before_predictor=1, use_dropout=True, dropout_keep_prob=0.8, kernel_size=1, box_code_size=4)) box_predictions = conv_box_predictor.predict( [image_features], num_predictions_per_location=[5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) (box_encodings, class_predictions_with_background) = self.execute(graph_fn, [image_features]) self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4]) self.assertAllEqual(class_predictions_with_background.shape, [4, 320, num_classes_without_background+1]) def test_get_predictions_with_feature_maps_of_dynamic_shape( self): image_features = tf.placeholder(dtype=tf.float32, shape=[4, None, None, 64]) conv_box_predictor = ( box_predictor_builder.build_convolutional_box_predictor( is_training=False, num_classes=0, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), min_depth=0, max_depth=32, num_layers_before_predictor=1, use_dropout=True, dropout_keep_prob=0.8, kernel_size=1, box_code_size=4)) box_predictions = conv_box_predictor.predict( [image_features], num_predictions_per_location=[5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) objectness_predictions = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) init_op = tf.global_variables_initializer() resolution = 32 expected_num_anchors = resolution*resolution*5 with self.test_session() as sess: sess.run(init_op) (box_encodings_shape, objectness_predictions_shape) = sess.run( [tf.shape(box_encodings), tf.shape(objectness_predictions)], feed_dict={image_features: np.random.rand(4, resolution, resolution, 64)}) actual_variable_set = set( [var.op.name for var in tf.trainable_variables()]) self.assertAllEqual(box_encodings_shape, [4, expected_num_anchors, 1, 4]) self.assertAllEqual(objectness_predictions_shape, [4, expected_num_anchors, 1]) expected_variable_set = set([ 'BoxPredictor/Conv2d_0_1x1_32/biases', 'BoxPredictor/Conv2d_0_1x1_32/weights', 'BoxPredictor/BoxEncodingPredictor/biases', 'BoxPredictor/BoxEncodingPredictor/weights', 'BoxPredictor/ClassPredictor/biases', 'BoxPredictor/ClassPredictor/weights']) self.assertEqual(expected_variable_set, actual_variable_set) def test_use_depthwise_convolution(self): image_features = tf.placeholder(dtype=tf.float32, shape=[4, None, None, 64]) conv_box_predictor = ( box_predictor_builder.build_convolutional_box_predictor( is_training=False, num_classes=0, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), min_depth=0, max_depth=32, num_layers_before_predictor=1, dropout_keep_prob=0.8, kernel_size=1, box_code_size=4, use_dropout=True, use_depthwise=True)) box_predictions = conv_box_predictor.predict( [image_features], num_predictions_per_location=[5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) objectness_predictions = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) init_op = tf.global_variables_initializer() resolution = 32 expected_num_anchors = resolution*resolution*5 with self.test_session() as sess: sess.run(init_op) (box_encodings_shape, objectness_predictions_shape) = sess.run( [tf.shape(box_encodings), tf.shape(objectness_predictions)], feed_dict={image_features: np.random.rand(4, resolution, resolution, 64)}) actual_variable_set = set( [var.op.name for var in tf.trainable_variables()]) self.assertAllEqual(box_encodings_shape, [4, expected_num_anchors, 1, 4]) self.assertAllEqual(objectness_predictions_shape, [4, expected_num_anchors, 1]) expected_variable_set = set([ 'BoxPredictor/Conv2d_0_1x1_32/biases', 'BoxPredictor/Conv2d_0_1x1_32/weights', 'BoxPredictor/BoxEncodingPredictor_depthwise/biases', 'BoxPredictor/BoxEncodingPredictor_depthwise/depthwise_weights', 'BoxPredictor/BoxEncodingPredictor/biases', 'BoxPredictor/BoxEncodingPredictor/weights', 'BoxPredictor/ClassPredictor_depthwise/biases', 'BoxPredictor/ClassPredictor_depthwise/depthwise_weights', 'BoxPredictor/ClassPredictor/biases', 'BoxPredictor/ClassPredictor/weights']) self.assertEqual(expected_variable_set, actual_variable_set) def test_no_dangling_outputs(self): image_features = tf.placeholder(dtype=tf.float32, shape=[4, None, None, 64]) conv_box_predictor = ( box_predictor_builder.build_convolutional_box_predictor( is_training=False, num_classes=0, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), min_depth=0, max_depth=32, num_layers_before_predictor=1, dropout_keep_prob=0.8, kernel_size=1, box_code_size=4, use_dropout=True, use_depthwise=True)) box_predictions = conv_box_predictor.predict( [image_features], num_predictions_per_location=[5], scope='BoxPredictor') tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) bad_dangling_ops = [] types_safe_to_dangle = set(['Assign', 'Mul', 'Const']) for op in tf.get_default_graph().get_operations(): if (not op.outputs) or (not op.outputs[0].consumers()): if 'BoxPredictor' in op.name: if op.type not in types_safe_to_dangle: bad_dangling_ops.append(op) self.assertEqual(bad_dangling_ops, []) class WeightSharedConvolutionalBoxPredictorTest(test_case.TestCase): def _build_arg_scope_with_conv_hyperparams(self): conv_hyperparams = hyperparams_pb2.Hyperparams() conv_hyperparams_text_proto = """ activation: RELU_6 regularizer { l2_regularizer { } } initializer { random_normal_initializer { stddev: 0.01 mean: 0.0 } } batch_norm { train: true, } """ text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) return hyperparams_builder.build(conv_hyperparams, is_training=True) def _build_conv_arg_scope_no_batch_norm(self): conv_hyperparams = hyperparams_pb2.Hyperparams() conv_hyperparams_text_proto = """ activation: RELU_6 regularizer { l2_regularizer { } } initializer { random_normal_initializer { stddev: 0.01 mean: 0.0 } } """ text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) return hyperparams_builder.build(conv_hyperparams, is_training=True) def test_get_boxes_for_five_aspect_ratios_per_location(self): def graph_fn(image_features): conv_box_predictor = ( box_predictor_builder.build_weight_shared_convolutional_box_predictor( is_training=False, num_classes=0, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), depth=32, num_layers_before_predictor=1, box_code_size=4)) box_predictions = conv_box_predictor.predict( [image_features], num_predictions_per_location=[5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) objectness_predictions = tf.concat(box_predictions[ box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, objectness_predictions) image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) (box_encodings, objectness_predictions) = self.execute( graph_fn, [image_features]) self.assertAllEqual(box_encodings.shape, [4, 320, 4]) self.assertAllEqual(objectness_predictions.shape, [4, 320, 1]) def test_bias_predictions_to_background_with_sigmoid_score_conversion(self): def graph_fn(image_features): conv_box_predictor = ( box_predictor_builder.build_weight_shared_convolutional_box_predictor( is_training=True, num_classes=2, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), depth=32, num_layers_before_predictor=1, class_prediction_bias_init=-4.6, box_code_size=4)) box_predictions = conv_box_predictor.predict( [image_features], num_predictions_per_location=[5], scope='BoxPredictor') class_predictions = tf.concat(box_predictions[ box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (tf.nn.sigmoid(class_predictions),) image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) class_predictions = self.execute(graph_fn, [image_features]) self.assertAlmostEqual(np.mean(class_predictions), 0.01, places=3) def test_get_multi_class_predictions_for_five_aspect_ratios_per_location( self): num_classes_without_background = 6 def graph_fn(image_features): conv_box_predictor = ( box_predictor_builder.build_weight_shared_convolutional_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), depth=32, num_layers_before_predictor=1, box_code_size=4)) box_predictions = conv_box_predictor.predict( [image_features], num_predictions_per_location=[5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat(box_predictions[ box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) (box_encodings, class_predictions_with_background) = self.execute( graph_fn, [image_features]) self.assertAllEqual(box_encodings.shape, [4, 320, 4]) self.assertAllEqual(class_predictions_with_background.shape, [4, 320, num_classes_without_background+1]) def test_get_multi_class_predictions_from_two_feature_maps( self): num_classes_without_background = 6 def graph_fn(image_features1, image_features2): conv_box_predictor = ( box_predictor_builder.build_weight_shared_convolutional_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), depth=32, num_layers_before_predictor=1, box_code_size=4)) box_predictions = conv_box_predictor.predict( [image_features1, image_features2], num_predictions_per_location=[5, 5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) image_features1 = np.random.rand(4, 8, 8, 64).astype(np.float32) image_features2 = np.random.rand(4, 8, 8, 64).astype(np.float32) (box_encodings, class_predictions_with_background) = self.execute( graph_fn, [image_features1, image_features2]) self.assertAllEqual(box_encodings.shape, [4, 640, 4]) self.assertAllEqual(class_predictions_with_background.shape, [4, 640, num_classes_without_background+1]) def test_get_multi_class_predictions_from_feature_maps_of_different_depth( self): num_classes_without_background = 6 def graph_fn(image_features1, image_features2, image_features3): conv_box_predictor = ( box_predictor_builder.build_weight_shared_convolutional_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), depth=32, num_layers_before_predictor=1, box_code_size=4)) box_predictions = conv_box_predictor.predict( [image_features1, image_features2, image_features3], num_predictions_per_location=[5, 5, 5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) image_features1 = np.random.rand(4, 8, 8, 64).astype(np.float32) image_features2 = np.random.rand(4, 8, 8, 64).astype(np.float32) image_features3 = np.random.rand(4, 8, 8, 32).astype(np.float32) (box_encodings, class_predictions_with_background) = self.execute( graph_fn, [image_features1, image_features2, image_features3]) self.assertAllEqual(box_encodings.shape, [4, 960, 4]) self.assertAllEqual(class_predictions_with_background.shape, [4, 960, num_classes_without_background+1]) def test_predictions_multiple_feature_maps_share_weights_separate_batchnorm( self): num_classes_without_background = 6 def graph_fn(image_features1, image_features2): conv_box_predictor = ( box_predictor_builder.build_weight_shared_convolutional_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), depth=32, num_layers_before_predictor=2, box_code_size=4)) box_predictions = conv_box_predictor.predict( [image_features1, image_features2], num_predictions_per_location=[5, 5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) with self.test_session(graph=tf.Graph()): graph_fn(tf.random_uniform([4, 32, 32, 3], dtype=tf.float32), tf.random_uniform([4, 16, 16, 3], dtype=tf.float32)) actual_variable_set = set( [var.op.name for var in tf.trainable_variables()]) expected_variable_set = set([ # Box prediction tower ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/BatchNorm/feature_0/beta'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/BatchNorm/feature_1/beta'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/BatchNorm/feature_0/beta'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/BatchNorm/feature_1/beta'), # Box prediction head ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictor/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictor/biases'), # Class prediction tower ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/BatchNorm/feature_0/beta'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/BatchNorm/feature_1/beta'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/BatchNorm/feature_0/beta'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/BatchNorm/feature_1/beta'), # Class prediction head ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictor/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictor/biases')]) self.assertEqual(expected_variable_set, actual_variable_set) def test_predictions_multiple_feature_maps_share_weights_without_batchnorm( self): num_classes_without_background = 6 def graph_fn(image_features1, image_features2): conv_box_predictor = ( box_predictor_builder.build_weight_shared_convolutional_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), depth=32, num_layers_before_predictor=2, box_code_size=4, apply_batch_norm=False)) box_predictions = conv_box_predictor.predict( [image_features1, image_features2], num_predictions_per_location=[5, 5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) with self.test_session(graph=tf.Graph()): graph_fn(tf.random_uniform([4, 32, 32, 3], dtype=tf.float32), tf.random_uniform([4, 16, 16, 3], dtype=tf.float32)) actual_variable_set = set( [var.op.name for var in tf.trainable_variables()]) expected_variable_set = set([ # Box prediction tower ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/biases'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/biases'), # Box prediction head ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictor/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictor/biases'), # Class prediction tower ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/biases'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/biases'), # Class prediction head ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictor/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictor/biases')]) self.assertEqual(expected_variable_set, actual_variable_set) def test_predictions_multiple_feature_maps_share_weights_with_depthwise( self): num_classes_without_background = 6 def graph_fn(image_features1, image_features2): conv_box_predictor = ( box_predictor_builder.build_weight_shared_convolutional_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), depth=32, num_layers_before_predictor=2, box_code_size=4, apply_batch_norm=False, use_depthwise=True)) box_predictions = conv_box_predictor.predict( [image_features1, image_features2], num_predictions_per_location=[5, 5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) with self.test_session(graph=tf.Graph()): graph_fn(tf.random_uniform([4, 32, 32, 3], dtype=tf.float32), tf.random_uniform([4, 16, 16, 3], dtype=tf.float32)) actual_variable_set = set( [var.op.name for var in tf.trainable_variables()]) expected_variable_set = set([ # Box prediction tower ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/depthwise_weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/pointwise_weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/biases'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/depthwise_weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/pointwise_weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/biases'), # Box prediction head ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictor/depthwise_weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictor/pointwise_weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictor/biases'), # Class prediction tower ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/depthwise_weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/pointwise_weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/biases'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/depthwise_weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/pointwise_weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/biases'), # Class prediction head ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictor/depthwise_weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictor/pointwise_weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictor/biases')]) self.assertEqual(expected_variable_set, actual_variable_set) def test_no_batchnorm_params_when_batchnorm_is_not_configured(self): num_classes_without_background = 6 def graph_fn(image_features1, image_features2): conv_box_predictor = ( box_predictor_builder.build_weight_shared_convolutional_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams_fn=self._build_conv_arg_scope_no_batch_norm(), depth=32, num_layers_before_predictor=2, box_code_size=4, apply_batch_norm=False)) box_predictions = conv_box_predictor.predict( [image_features1, image_features2], num_predictions_per_location=[5, 5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) with self.test_session(graph=tf.Graph()): graph_fn(tf.random_uniform([4, 32, 32, 3], dtype=tf.float32), tf.random_uniform([4, 16, 16, 3], dtype=tf.float32)) actual_variable_set = set( [var.op.name for var in tf.trainable_variables()]) expected_variable_set = set([ # Box prediction tower ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/biases'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/biases'), # Box prediction head ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictor/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictor/biases'), # Class prediction tower ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/biases'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/biases'), # Class prediction head ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictor/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictor/biases')]) self.assertEqual(expected_variable_set, actual_variable_set) def test_predictions_share_weights_share_tower_separate_batchnorm( self): num_classes_without_background = 6 def graph_fn(image_features1, image_features2): conv_box_predictor = ( box_predictor_builder.build_weight_shared_convolutional_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), depth=32, num_layers_before_predictor=2, box_code_size=4, share_prediction_tower=True)) box_predictions = conv_box_predictor.predict( [image_features1, image_features2], num_predictions_per_location=[5, 5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) with self.test_session(graph=tf.Graph()): graph_fn(tf.random_uniform([4, 32, 32, 3], dtype=tf.float32), tf.random_uniform([4, 16, 16, 3], dtype=tf.float32)) actual_variable_set = set( [var.op.name for var in tf.trainable_variables()]) expected_variable_set = set([ # Shared prediction tower ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_0/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_0/BatchNorm/feature_0/beta'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_0/BatchNorm/feature_1/beta'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_1/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_1/BatchNorm/feature_0/beta'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_1/BatchNorm/feature_1/beta'), # Box prediction head ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictor/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictor/biases'), # Class prediction head ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictor/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictor/biases')]) self.assertEqual(expected_variable_set, actual_variable_set) def test_predictions_share_weights_share_tower_without_batchnorm( self): num_classes_without_background = 6 def graph_fn(image_features1, image_features2): conv_box_predictor = ( box_predictor_builder.build_weight_shared_convolutional_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), depth=32, num_layers_before_predictor=2, box_code_size=4, share_prediction_tower=True, apply_batch_norm=False)) box_predictions = conv_box_predictor.predict( [image_features1, image_features2], num_predictions_per_location=[5, 5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) with self.test_session(graph=tf.Graph()): graph_fn(tf.random_uniform([4, 32, 32, 3], dtype=tf.float32), tf.random_uniform([4, 16, 16, 3], dtype=tf.float32)) actual_variable_set = set( [var.op.name for var in tf.trainable_variables()]) expected_variable_set = set([ # Shared prediction tower ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_0/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_0/biases'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_1/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_1/biases'), # Box prediction head ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictor/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictor/biases'), # Class prediction head ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictor/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictor/biases')]) self.assertEqual(expected_variable_set, actual_variable_set) def test_get_predictions_with_feature_maps_of_dynamic_shape( self): image_features = tf.placeholder(dtype=tf.float32, shape=[4, None, None, 64]) conv_box_predictor = ( box_predictor_builder.build_weight_shared_convolutional_box_predictor( is_training=False, num_classes=0, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), depth=32, num_layers_before_predictor=1, box_code_size=4)) box_predictions = conv_box_predictor.predict( [image_features], num_predictions_per_location=[5], scope='BoxPredictor') box_encodings = tf.concat(box_predictions[box_predictor.BOX_ENCODINGS], axis=1) objectness_predictions = tf.concat(box_predictions[ box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) init_op = tf.global_variables_initializer() resolution = 32 expected_num_anchors = resolution*resolution*5 with self.test_session() as sess: sess.run(init_op) (box_encodings_shape, objectness_predictions_shape) = sess.run( [tf.shape(box_encodings), tf.shape(objectness_predictions)], feed_dict={image_features: np.random.rand(4, resolution, resolution, 64)}) self.assertAllEqual(box_encodings_shape, [4, expected_num_anchors, 4]) self.assertAllEqual(objectness_predictions_shape, [4, expected_num_anchors, 1]) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/predictors/convolutional_box_predictor_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Mask R-CNN Box Predictor.""" import tensorflow as tf from object_detection.core import box_predictor slim = tf.contrib.slim BOX_ENCODINGS = box_predictor.BOX_ENCODINGS CLASS_PREDICTIONS_WITH_BACKGROUND = ( box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND) MASK_PREDICTIONS = box_predictor.MASK_PREDICTIONS class MaskRCNNBoxPredictor(box_predictor.BoxPredictor): """Mask R-CNN Box Predictor. See Mask R-CNN: He, K., Gkioxari, G., Dollar, P., & Girshick, R. (2017). Mask R-CNN. arXiv preprint arXiv:1703.06870. This is used for the second stage of the Mask R-CNN detector where proposals cropped from an image are arranged along the batch dimension of the input image_features tensor. Notice that locations are *not* shared across classes, thus for each anchor, a separate prediction is made for each class. In addition to predicting boxes and classes, optionally this class allows predicting masks and/or keypoints inside detection boxes. Currently this box predictor makes per-class predictions; that is, each anchor makes a separate box prediction for each class. """ def __init__(self, is_training, num_classes, box_prediction_head, class_prediction_head, third_stage_heads): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). box_prediction_head: The head that predicts the boxes in second stage. class_prediction_head: The head that predicts the classes in second stage. third_stage_heads: A dictionary mapping head names to mask rcnn head classes. """ super(MaskRCNNBoxPredictor, self).__init__(is_training, num_classes) self._box_prediction_head = box_prediction_head self._class_prediction_head = class_prediction_head self._third_stage_heads = third_stage_heads @property def num_classes(self): return self._num_classes def get_second_stage_prediction_heads(self): return BOX_ENCODINGS, CLASS_PREDICTIONS_WITH_BACKGROUND def get_third_stage_prediction_heads(self): return sorted(self._third_stage_heads.keys()) def _predict(self, image_features, num_predictions_per_location, prediction_stage=2): """Optionally computes encoded object locations, confidences, and masks. Predicts the heads belonging to the given prediction stage. Args: image_features: A list of float tensors of shape [batch_size, height_i, width_i, channels_i] containing roi pooled features for each image. The length of the list should be 1 otherwise a ValueError will be raised. num_predictions_per_location: A list of integers representing the number of box predictions to be made per spatial location for each feature map. Currently, this must be set to [1], or an error will be raised. prediction_stage: Prediction stage. Acceptable values are 2 and 3. Returns: A dictionary containing the predicted tensors that are listed in self._prediction_heads. A subset of the following keys will exist in the dictionary: BOX_ENCODINGS: A float tensor of shape [batch_size, 1, num_classes, code_size] representing the location of the objects. CLASS_PREDICTIONS_WITH_BACKGROUND: A float tensor of shape [batch_size, 1, num_classes + 1] representing the class predictions for the proposals. MASK_PREDICTIONS: A float tensor of shape [batch_size, 1, num_classes, image_height, image_width] Raises: ValueError: If num_predictions_per_location is not 1 or if len(image_features) is not 1. ValueError: if prediction_stage is not 2 or 3. """ if (len(num_predictions_per_location) != 1 or num_predictions_per_location[0] != 1): raise ValueError('Currently FullyConnectedBoxPredictor only supports ' 'predicting a single box per class per location.') if len(image_features) != 1: raise ValueError('length of `image_features` must be 1. Found {}'.format( len(image_features))) image_feature = image_features[0] predictions_dict = {} if prediction_stage == 2: predictions_dict[BOX_ENCODINGS] = self._box_prediction_head.predict( features=image_feature, num_predictions_per_location=num_predictions_per_location[0]) predictions_dict[CLASS_PREDICTIONS_WITH_BACKGROUND] = ( self._class_prediction_head.predict( features=image_feature, num_predictions_per_location=num_predictions_per_location[0])) elif prediction_stage == 3: for prediction_head in self.get_third_stage_prediction_heads(): head_object = self._third_stage_heads[prediction_head] predictions_dict[prediction_head] = head_object.predict( features=image_feature, num_predictions_per_location=num_predictions_per_location[0]) else: raise ValueError('prediction_stage should be either 2 or 3.') return predictions_dict
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/predictors/mask_rcnn_box_predictor.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """RFCN Box Predictor.""" import tensorflow as tf from object_detection.core import box_predictor from object_detection.utils import ops slim = tf.contrib.slim BOX_ENCODINGS = box_predictor.BOX_ENCODINGS CLASS_PREDICTIONS_WITH_BACKGROUND = ( box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND) MASK_PREDICTIONS = box_predictor.MASK_PREDICTIONS class RfcnBoxPredictor(box_predictor.BoxPredictor): """RFCN Box Predictor. Applies a position sensitive ROI pooling on position sensitive feature maps to predict classes and refined locations. See https://arxiv.org/abs/1605.06409 for details. This is used for the second stage of the RFCN meta architecture. Notice that locations are *not* shared across classes, thus for each anchor, a separate prediction is made for each class. """ def __init__(self, is_training, num_classes, conv_hyperparams_fn, num_spatial_bins, depth, crop_size, box_code_size): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). conv_hyperparams_fn: A function to construct tf-slim arg_scope with hyperparameters for convolutional layers. num_spatial_bins: A list of two integers `[spatial_bins_y, spatial_bins_x]`. depth: Target depth to reduce the input feature maps to. crop_size: A list of two integers `[crop_height, crop_width]`. box_code_size: Size of encoding for each box. """ super(RfcnBoxPredictor, self).__init__(is_training, num_classes) self._conv_hyperparams_fn = conv_hyperparams_fn self._num_spatial_bins = num_spatial_bins self._depth = depth self._crop_size = crop_size self._box_code_size = box_code_size @property def num_classes(self): return self._num_classes def _predict(self, image_features, num_predictions_per_location, proposal_boxes): """Computes encoded object locations and corresponding confidences. Args: image_features: A list of float tensors of shape [batch_size, height_i, width_i, channels_i] containing features for a batch of images. num_predictions_per_location: A list of integers representing the number of box predictions to be made per spatial location for each feature map. Currently, this must be set to [1], or an error will be raised. proposal_boxes: A float tensor of shape [batch_size, num_proposals, box_code_size]. Returns: box_encodings: A list of float tensors of shape [batch_size, num_anchors_i, q, code_size] representing the location of the objects, where q is 1 or the number of classes. Each entry in the list corresponds to a feature map in the input `image_features` list. class_predictions_with_background: A list of float tensors of shape [batch_size, num_anchors_i, num_classes + 1] representing the class predictions for the proposals. Each entry in the list corresponds to a feature map in the input `image_features` list. Raises: ValueError: if num_predictions_per_location is not 1 or if len(image_features) is not 1. """ if (len(num_predictions_per_location) != 1 or num_predictions_per_location[0] != 1): raise ValueError('Currently RfcnBoxPredictor only supports ' 'predicting a single box per class per location.') if len(image_features) != 1: raise ValueError('length of `image_features` must be 1. Found {}'. format(len(image_features))) image_feature = image_features[0] num_predictions_per_location = num_predictions_per_location[0] batch_size = tf.shape(proposal_boxes)[0] num_boxes = tf.shape(proposal_boxes)[1] net = image_feature with slim.arg_scope(self._conv_hyperparams_fn()): net = slim.conv2d(net, self._depth, [1, 1], scope='reduce_depth') # Location predictions. location_feature_map_depth = (self._num_spatial_bins[0] * self._num_spatial_bins[1] * self.num_classes * self._box_code_size) location_feature_map = slim.conv2d(net, location_feature_map_depth, [1, 1], activation_fn=None, scope='refined_locations') box_encodings = ops.batch_position_sensitive_crop_regions( location_feature_map, boxes=proposal_boxes, crop_size=self._crop_size, num_spatial_bins=self._num_spatial_bins, global_pool=True) box_encodings = tf.squeeze(box_encodings, squeeze_dims=[2, 3]) box_encodings = tf.reshape(box_encodings, [batch_size * num_boxes, 1, self.num_classes, self._box_code_size]) # Class predictions. total_classes = self.num_classes + 1 # Account for background class. class_feature_map_depth = (self._num_spatial_bins[0] * self._num_spatial_bins[1] * total_classes) class_feature_map = slim.conv2d(net, class_feature_map_depth, [1, 1], activation_fn=None, scope='class_predictions') class_predictions_with_background = ( ops.batch_position_sensitive_crop_regions( class_feature_map, boxes=proposal_boxes, crop_size=self._crop_size, num_spatial_bins=self._num_spatial_bins, global_pool=True)) class_predictions_with_background = tf.squeeze( class_predictions_with_background, squeeze_dims=[2, 3]) class_predictions_with_background = tf.reshape( class_predictions_with_background, [batch_size * num_boxes, 1, total_classes]) return {BOX_ENCODINGS: [box_encodings], CLASS_PREDICTIONS_WITH_BACKGROUND: [class_predictions_with_background]}
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/predictors/rfcn_box_predictor.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.predictors.convolutional_keras_box_predictor.""" import numpy as np import tensorflow as tf from google.protobuf import text_format from object_detection.builders import box_predictor_builder from object_detection.builders import hyperparams_builder from object_detection.predictors import convolutional_keras_box_predictor as box_predictor from object_detection.protos import hyperparams_pb2 from object_detection.utils import test_case class ConvolutionalKerasBoxPredictorTest(test_case.TestCase): def _build_conv_hyperparams(self): conv_hyperparams = hyperparams_pb2.Hyperparams() conv_hyperparams_text_proto = """ activation: RELU_6 regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) def test_get_boxes_for_five_aspect_ratios_per_location(self): def graph_fn(image_features): conv_box_predictor = ( box_predictor_builder.build_convolutional_keras_box_predictor( is_training=False, num_classes=0, conv_hyperparams=self._build_conv_hyperparams(), freeze_batchnorm=False, inplace_batchnorm_update=False, num_predictions_per_location_list=[5], min_depth=0, max_depth=32, num_layers_before_predictor=1, use_dropout=True, dropout_keep_prob=0.8, kernel_size=1, box_code_size=4 )) box_predictions = conv_box_predictor([image_features]) box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) objectness_predictions = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, objectness_predictions) image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) (box_encodings, objectness_predictions) = self.execute(graph_fn, [image_features]) self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4]) self.assertAllEqual(objectness_predictions.shape, [4, 320, 1]) def test_get_boxes_for_one_aspect_ratio_per_location(self): def graph_fn(image_features): conv_box_predictor = ( box_predictor_builder.build_convolutional_keras_box_predictor( is_training=False, num_classes=0, conv_hyperparams=self._build_conv_hyperparams(), freeze_batchnorm=False, inplace_batchnorm_update=False, num_predictions_per_location_list=[1], min_depth=0, max_depth=32, num_layers_before_predictor=1, use_dropout=True, dropout_keep_prob=0.8, kernel_size=1, box_code_size=4 )) box_predictions = conv_box_predictor([image_features]) box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) objectness_predictions = tf.concat(box_predictions[ box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, objectness_predictions) image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) (box_encodings, objectness_predictions) = self.execute(graph_fn, [image_features]) self.assertAllEqual(box_encodings.shape, [4, 64, 1, 4]) self.assertAllEqual(objectness_predictions.shape, [4, 64, 1]) def test_get_multi_class_predictions_for_five_aspect_ratios_per_location( self): num_classes_without_background = 6 image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) def graph_fn(image_features): conv_box_predictor = ( box_predictor_builder.build_convolutional_keras_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams=self._build_conv_hyperparams(), freeze_batchnorm=False, inplace_batchnorm_update=False, num_predictions_per_location_list=[5], min_depth=0, max_depth=32, num_layers_before_predictor=1, use_dropout=True, dropout_keep_prob=0.8, kernel_size=1, box_code_size=4 )) box_predictions = conv_box_predictor([image_features]) box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) (box_encodings, class_predictions_with_background) = self.execute(graph_fn, [image_features]) self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4]) self.assertAllEqual(class_predictions_with_background.shape, [4, 320, num_classes_without_background+1]) def test_get_predictions_with_feature_maps_of_dynamic_shape( self): image_features = tf.placeholder(dtype=tf.float32, shape=[4, None, None, 64]) conv_box_predictor = ( box_predictor_builder.build_convolutional_keras_box_predictor( is_training=False, num_classes=0, conv_hyperparams=self._build_conv_hyperparams(), freeze_batchnorm=False, inplace_batchnorm_update=False, num_predictions_per_location_list=[5], min_depth=0, max_depth=32, num_layers_before_predictor=1, use_dropout=True, dropout_keep_prob=0.8, kernel_size=1, box_code_size=4 )) box_predictions = conv_box_predictor([image_features]) box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) objectness_predictions = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) init_op = tf.global_variables_initializer() resolution = 32 expected_num_anchors = resolution*resolution*5 with self.test_session() as sess: sess.run(init_op) (box_encodings_shape, objectness_predictions_shape) = sess.run( [tf.shape(box_encodings), tf.shape(objectness_predictions)], feed_dict={image_features: np.random.rand(4, resolution, resolution, 64)}) actual_variable_set = set( [var.op.name for var in tf.trainable_variables()]) self.assertAllEqual(box_encodings_shape, [4, expected_num_anchors, 1, 4]) self.assertAllEqual(objectness_predictions_shape, [4, expected_num_anchors, 1]) expected_variable_set = set([ 'BoxPredictor/SharedConvolutions_0/Conv2d_0_1x1_32/bias', 'BoxPredictor/SharedConvolutions_0/Conv2d_0_1x1_32/kernel', 'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor/bias', 'BoxPredictor/ConvolutionalBoxHead_0/BoxEncodingPredictor/kernel', 'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor/bias', 'BoxPredictor/ConvolutionalClassHead_0/ClassPredictor/kernel']) self.assertEqual(expected_variable_set, actual_variable_set) self.assertEqual(conv_box_predictor._sorted_head_names, ['box_encodings', 'class_predictions_with_background']) # TODO(kaftan): Remove conditional after CMLE moves to TF 1.10 if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/predictors/convolutional_keras_box_predictor_test.py
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/predictors/__init__.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Convolutional Box Predictors with and without weight sharing.""" import collections import tensorflow as tf from object_detection.core import box_predictor from object_detection.utils import static_shape keras = tf.keras.layers BOX_ENCODINGS = box_predictor.BOX_ENCODINGS CLASS_PREDICTIONS_WITH_BACKGROUND = ( box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND) MASK_PREDICTIONS = box_predictor.MASK_PREDICTIONS class _NoopVariableScope(object): """A dummy class that does not push any scope.""" def __enter__(self): return None def __exit__(self, exc_type, exc_value, traceback): return False class ConvolutionalBoxPredictor(box_predictor.KerasBoxPredictor): """Convolutional Keras Box Predictor. Optionally add an intermediate 1x1 convolutional layer after features and predict in parallel branches box_encodings and class_predictions_with_background. Currently this box predictor assumes that predictions are "shared" across classes --- that is each anchor makes box predictions which do not depend on class. """ def __init__(self, is_training, num_classes, box_prediction_heads, class_prediction_heads, other_heads, conv_hyperparams, num_layers_before_predictor, min_depth, max_depth, freeze_batchnorm, inplace_batchnorm_update, name=None): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). box_prediction_heads: A list of heads that predict the boxes. class_prediction_heads: A list of heads that predict the classes. other_heads: A dictionary mapping head names to lists of convolutional heads. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. num_layers_before_predictor: Number of the additional conv layers before the predictor. min_depth: Minimum feature depth prior to predicting box encodings and class predictions. max_depth: Maximum feature depth prior to predicting box encodings and class predictions. If max_depth is set to 0, no additional feature map will be inserted before location and class predictions. freeze_batchnorm: Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. inplace_batchnorm_update: Whether to update batch norm moving average values inplace. When this is false train op must add a control dependency on tf.graphkeys.UPDATE_OPS collection in order to update batch norm statistics. name: A string name scope to assign to the model. If `None`, Keras will auto-generate one from the class name. Raises: ValueError: if min_depth > max_depth. """ super(ConvolutionalBoxPredictor, self).__init__( is_training, num_classes, freeze_batchnorm=freeze_batchnorm, inplace_batchnorm_update=inplace_batchnorm_update, name=name) if min_depth > max_depth: raise ValueError('min_depth should be less than or equal to max_depth') if len(box_prediction_heads) != len(class_prediction_heads): raise ValueError('All lists of heads must be the same length.') for other_head_list in other_heads.values(): if len(box_prediction_heads) != len(other_head_list): raise ValueError('All lists of heads must be the same length.') self._prediction_heads = { BOX_ENCODINGS: box_prediction_heads, CLASS_PREDICTIONS_WITH_BACKGROUND: class_prediction_heads, } if other_heads: self._prediction_heads.update(other_heads) # We generate a consistent ordering for the prediction head names, # So that all workers build the model in the exact same order self._sorted_head_names = sorted(self._prediction_heads.keys()) self._conv_hyperparams = conv_hyperparams self._min_depth = min_depth self._max_depth = max_depth self._num_layers_before_predictor = num_layers_before_predictor self._shared_nets = [] def build(self, input_shapes): """Creates the variables of the layer.""" if len(input_shapes) != len(self._prediction_heads[BOX_ENCODINGS]): raise ValueError('This box predictor was constructed with %d heads,' 'but there are %d inputs.' % (len(self._prediction_heads[BOX_ENCODINGS]), len(input_shapes))) for stack_index, input_shape in enumerate(input_shapes): net = [] # Add additional conv layers before the class predictor. features_depth = static_shape.get_depth(input_shape) depth = max(min(features_depth, self._max_depth), self._min_depth) tf.logging.info( 'depth of additional conv before box predictor: {}'.format(depth)) if depth > 0 and self._num_layers_before_predictor > 0: for i in range(self._num_layers_before_predictor): net.append(keras.Conv2D(depth, [1, 1], name='SharedConvolutions_%d/Conv2d_%d_1x1_%d' % (stack_index, i, depth), padding='SAME', **self._conv_hyperparams.params())) net.append(self._conv_hyperparams.build_batch_norm( training=(self._is_training and not self._freeze_batchnorm), name='SharedConvolutions_%d/Conv2d_%d_1x1_%d_norm' % (stack_index, i, depth))) net.append(self._conv_hyperparams.build_activation_layer( name='SharedConvolutions_%d/Conv2d_%d_1x1_%d_activation' % (stack_index, i, depth), )) # Until certain bugs are fixed in checkpointable lists, # this net must be appended only once it's been filled with layers self._shared_nets.append(net) self.built = True def _predict(self, image_features): """Computes encoded object locations and corresponding confidences. Args: image_features: A list of float tensors of shape [batch_size, height_i, width_i, channels_i] containing features for a batch of images. Returns: box_encodings: A list of float tensors of shape [batch_size, num_anchors_i, q, code_size] representing the location of the objects, where q is 1 or the number of classes. Each entry in the list corresponds to a feature map in the input `image_features` list. class_predictions_with_background: A list of float tensors of shape [batch_size, num_anchors_i, num_classes + 1] representing the class predictions for the proposals. Each entry in the list corresponds to a feature map in the input `image_features` list. """ predictions = collections.defaultdict(list) for (index, net) in enumerate(image_features): # Apply shared conv layers before the head predictors. for layer in self._shared_nets[index]: net = layer(net) for head_name in self._sorted_head_names: head_obj = self._prediction_heads[head_name][index] prediction = head_obj(net) predictions[head_name].append(prediction) return predictions
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/predictors/convolutional_keras_box_predictor.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.predictors.rfcn_box_predictor.""" import numpy as np import tensorflow as tf from google.protobuf import text_format from object_detection.builders import hyperparams_builder from object_detection.predictors import rfcn_box_predictor as box_predictor from object_detection.protos import hyperparams_pb2 from object_detection.utils import test_case class RfcnBoxPredictorTest(test_case.TestCase): def _build_arg_scope_with_conv_hyperparams(self): conv_hyperparams = hyperparams_pb2.Hyperparams() conv_hyperparams_text_proto = """ regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) return hyperparams_builder.build(conv_hyperparams, is_training=True) def test_get_correct_box_encoding_and_class_prediction_shapes(self): def graph_fn(image_features, proposal_boxes): rfcn_box_predictor = box_predictor.RfcnBoxPredictor( is_training=False, num_classes=2, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), num_spatial_bins=[3, 3], depth=4, crop_size=[12, 12], box_code_size=4 ) box_predictions = rfcn_box_predictor.predict( [image_features], num_predictions_per_location=[1], scope='BoxPredictor', proposal_boxes=proposal_boxes) box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) proposal_boxes = np.random.rand(4, 2, 4).astype(np.float32) (box_encodings, class_predictions_with_background) = self.execute( graph_fn, [image_features, proposal_boxes]) self.assertAllEqual(box_encodings.shape, [8, 1, 2, 4]) self.assertAllEqual(class_predictions_with_background.shape, [8, 1, 3]) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/predictors/rfcn_box_predictor_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.predictors.mask_rcnn_box_predictor.""" import numpy as np import tensorflow as tf from google.protobuf import text_format from object_detection.builders import box_predictor_builder from object_detection.builders import hyperparams_builder from object_detection.predictors import mask_rcnn_box_predictor as box_predictor from object_detection.protos import hyperparams_pb2 from object_detection.utils import test_case class MaskRCNNBoxPredictorTest(test_case.TestCase): def _build_arg_scope_with_hyperparams(self, op_type=hyperparams_pb2.Hyperparams.FC): hyperparams = hyperparams_pb2.Hyperparams() hyperparams_text_proto = """ activation: NONE regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(hyperparams_text_proto, hyperparams) hyperparams.op = op_type return hyperparams_builder.build(hyperparams, is_training=True) def test_get_boxes_with_five_classes(self): def graph_fn(image_features): mask_box_predictor = box_predictor_builder.build_mask_rcnn_box_predictor( is_training=False, num_classes=5, fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(), use_dropout=False, dropout_keep_prob=0.5, box_code_size=4, ) box_predictions = mask_box_predictor.predict( [image_features], num_predictions_per_location=[1], scope='BoxPredictor', prediction_stage=2) return (box_predictions[box_predictor.BOX_ENCODINGS], box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]) image_features = np.random.rand(2, 7, 7, 3).astype(np.float32) (box_encodings, class_predictions_with_background) = self.execute(graph_fn, [image_features]) self.assertAllEqual(box_encodings.shape, [2, 1, 5, 4]) self.assertAllEqual(class_predictions_with_background.shape, [2, 1, 6]) def test_get_boxes_with_five_classes_share_box_across_classes(self): def graph_fn(image_features): mask_box_predictor = box_predictor_builder.build_mask_rcnn_box_predictor( is_training=False, num_classes=5, fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(), use_dropout=False, dropout_keep_prob=0.5, box_code_size=4, share_box_across_classes=True ) box_predictions = mask_box_predictor.predict( [image_features], num_predictions_per_location=[1], scope='BoxPredictor', prediction_stage=2) return (box_predictions[box_predictor.BOX_ENCODINGS], box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND]) image_features = np.random.rand(2, 7, 7, 3).astype(np.float32) (box_encodings, class_predictions_with_background) = self.execute(graph_fn, [image_features]) self.assertAllEqual(box_encodings.shape, [2, 1, 1, 4]) self.assertAllEqual(class_predictions_with_background.shape, [2, 1, 6]) def test_value_error_on_predict_instance_masks_with_no_conv_hyperparms(self): with self.assertRaises(ValueError): box_predictor_builder.build_mask_rcnn_box_predictor( is_training=False, num_classes=5, fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(), use_dropout=False, dropout_keep_prob=0.5, box_code_size=4, predict_instance_masks=True) def test_get_instance_masks(self): def graph_fn(image_features): mask_box_predictor = box_predictor_builder.build_mask_rcnn_box_predictor( is_training=False, num_classes=5, fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(), use_dropout=False, dropout_keep_prob=0.5, box_code_size=4, conv_hyperparams_fn=self._build_arg_scope_with_hyperparams( op_type=hyperparams_pb2.Hyperparams.CONV), predict_instance_masks=True) box_predictions = mask_box_predictor.predict( [image_features], num_predictions_per_location=[1], scope='BoxPredictor', prediction_stage=3) return (box_predictions[box_predictor.MASK_PREDICTIONS],) image_features = np.random.rand(2, 7, 7, 3).astype(np.float32) mask_predictions = self.execute(graph_fn, [image_features]) self.assertAllEqual(mask_predictions.shape, [2, 1, 5, 14, 14]) def test_do_not_return_instance_masks_without_request(self): image_features = tf.random_uniform([2, 7, 7, 3], dtype=tf.float32) mask_box_predictor = box_predictor_builder.build_mask_rcnn_box_predictor( is_training=False, num_classes=5, fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(), use_dropout=False, dropout_keep_prob=0.5, box_code_size=4) box_predictions = mask_box_predictor.predict( [image_features], num_predictions_per_location=[1], scope='BoxPredictor', prediction_stage=2) self.assertEqual(len(box_predictions), 2) self.assertTrue(box_predictor.BOX_ENCODINGS in box_predictions) self.assertTrue(box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND in box_predictions) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/predictors/mask_rcnn_box_predictor_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Convolutional Box Predictors with and without weight sharing.""" import functools import tensorflow as tf from object_detection.core import box_predictor from object_detection.utils import static_shape slim = tf.contrib.slim BOX_ENCODINGS = box_predictor.BOX_ENCODINGS CLASS_PREDICTIONS_WITH_BACKGROUND = ( box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND) MASK_PREDICTIONS = box_predictor.MASK_PREDICTIONS class _NoopVariableScope(object): """A dummy class that does not push any scope.""" def __enter__(self): return None def __exit__(self, exc_type, exc_value, traceback): return False class ConvolutionalBoxPredictor(box_predictor.BoxPredictor): """Convolutional Box Predictor. Optionally add an intermediate 1x1 convolutional layer after features and predict in parallel branches box_encodings and class_predictions_with_background. Currently this box predictor assumes that predictions are "shared" across classes --- that is each anchor makes box predictions which do not depend on class. """ def __init__(self, is_training, num_classes, box_prediction_head, class_prediction_head, other_heads, conv_hyperparams_fn, num_layers_before_predictor, min_depth, max_depth): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). box_prediction_head: The head that predicts the boxes. class_prediction_head: The head that predicts the classes. other_heads: A dictionary mapping head names to convolutional head classes. conv_hyperparams_fn: A function to generate tf-slim arg_scope with hyperparameters for convolution ops. num_layers_before_predictor: Number of the additional conv layers before the predictor. min_depth: Minimum feature depth prior to predicting box encodings and class predictions. max_depth: Maximum feature depth prior to predicting box encodings and class predictions. If max_depth is set to 0, no additional feature map will be inserted before location and class predictions. Raises: ValueError: if min_depth > max_depth. """ super(ConvolutionalBoxPredictor, self).__init__(is_training, num_classes) self._box_prediction_head = box_prediction_head self._class_prediction_head = class_prediction_head self._other_heads = other_heads self._conv_hyperparams_fn = conv_hyperparams_fn self._min_depth = min_depth self._max_depth = max_depth self._num_layers_before_predictor = num_layers_before_predictor @property def num_classes(self): return self._num_classes def _predict(self, image_features, num_predictions_per_location_list): """Computes encoded object locations and corresponding confidences. Args: image_features: A list of float tensors of shape [batch_size, height_i, width_i, channels_i] containing features for a batch of images. num_predictions_per_location_list: A list of integers representing the number of box predictions to be made per spatial location for each feature map. Returns: box_encodings: A list of float tensors of shape [batch_size, num_anchors_i, q, code_size] representing the location of the objects, where q is 1 or the number of classes. Each entry in the list corresponds to a feature map in the input `image_features` list. class_predictions_with_background: A list of float tensors of shape [batch_size, num_anchors_i, num_classes + 1] representing the class predictions for the proposals. Each entry in the list corresponds to a feature map in the input `image_features` list. """ predictions = { BOX_ENCODINGS: [], CLASS_PREDICTIONS_WITH_BACKGROUND: [], } for head_name in self._other_heads.keys(): predictions[head_name] = [] # TODO(rathodv): Come up with a better way to generate scope names # in box predictor once we have time to retrain all models in the zoo. # The following lines create scope names to be backwards compatible with the # existing checkpoints. box_predictor_scopes = [_NoopVariableScope()] if len(image_features) > 1: box_predictor_scopes = [ tf.variable_scope('BoxPredictor_{}'.format(i)) for i in range(len(image_features)) ] for (image_feature, num_predictions_per_location, box_predictor_scope) in zip( image_features, num_predictions_per_location_list, box_predictor_scopes): net = image_feature with box_predictor_scope: with slim.arg_scope(self._conv_hyperparams_fn()): with slim.arg_scope([slim.dropout], is_training=self._is_training): # Add additional conv layers before the class predictor. features_depth = static_shape.get_depth(image_feature.get_shape()) depth = max(min(features_depth, self._max_depth), self._min_depth) tf.logging.info('depth of additional conv before box predictor: {}'. format(depth)) if depth > 0 and self._num_layers_before_predictor > 0: for i in range(self._num_layers_before_predictor): net = slim.conv2d( net, depth, [1, 1], reuse=tf.AUTO_REUSE, scope='Conv2d_%d_1x1_%d' % (i, depth)) sorted_keys = sorted(self._other_heads.keys()) sorted_keys.append(BOX_ENCODINGS) sorted_keys.append(CLASS_PREDICTIONS_WITH_BACKGROUND) for head_name in sorted_keys: if head_name == BOX_ENCODINGS: head_obj = self._box_prediction_head elif head_name == CLASS_PREDICTIONS_WITH_BACKGROUND: head_obj = self._class_prediction_head else: head_obj = self._other_heads[head_name] prediction = head_obj.predict( features=net, num_predictions_per_location=num_predictions_per_location) predictions[head_name].append(prediction) return predictions # TODO(rathodv): Replace with slim.arg_scope_func_key once its available # externally. def _arg_scope_func_key(op): """Returns a key that can be used to index arg_scope dictionary.""" return getattr(op, '_key_op', str(op)) # TODO(rathodv): Merge the implementation with ConvolutionalBoxPredictor above # since they are very similar. class WeightSharedConvolutionalBoxPredictor(box_predictor.BoxPredictor): """Convolutional Box Predictor with weight sharing. Defines the box predictor as defined in https://arxiv.org/abs/1708.02002. This class differs from ConvolutionalBoxPredictor in that it shares weights and biases while predicting from different feature maps. However, batch_norm parameters are not shared because the statistics of the activations vary among the different feature maps. Also note that separate multi-layer towers are constructed for the box encoding and class predictors respectively. """ def __init__(self, is_training, num_classes, box_prediction_head, class_prediction_head, other_heads, conv_hyperparams_fn, depth, num_layers_before_predictor, kernel_size=3, apply_batch_norm=False, share_prediction_tower=False, use_depthwise=False): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). box_prediction_head: The head that predicts the boxes. class_prediction_head: The head that predicts the classes. other_heads: A dictionary mapping head names to convolutional head classes. conv_hyperparams_fn: A function to generate tf-slim arg_scope with hyperparameters for convolution ops. depth: depth of conv layers. num_layers_before_predictor: Number of the additional conv layers before the predictor. kernel_size: Size of final convolution kernel. apply_batch_norm: Whether to apply batch normalization to conv layers in this predictor. share_prediction_tower: Whether to share the multi-layer tower between box prediction and class prediction heads. use_depthwise: Whether to use depthwise separable conv2d instead of regular conv2d. """ super(WeightSharedConvolutionalBoxPredictor, self).__init__(is_training, num_classes) self._box_prediction_head = box_prediction_head self._class_prediction_head = class_prediction_head self._other_heads = other_heads self._conv_hyperparams_fn = conv_hyperparams_fn self._depth = depth self._num_layers_before_predictor = num_layers_before_predictor self._kernel_size = kernel_size self._apply_batch_norm = apply_batch_norm self._share_prediction_tower = share_prediction_tower self._use_depthwise = use_depthwise @property def num_classes(self): return self._num_classes def _insert_additional_projection_layer(self, image_feature, inserted_layer_counter, target_channel): if inserted_layer_counter < 0: return image_feature, inserted_layer_counter image_feature = slim.conv2d( image_feature, target_channel, [1, 1], stride=1, padding='SAME', activation_fn=None, normalizer_fn=(tf.identity if self._apply_batch_norm else None), scope='ProjectionLayer/conv2d_{}'.format( inserted_layer_counter)) if self._apply_batch_norm: image_feature = slim.batch_norm( image_feature, scope='ProjectionLayer/conv2d_{}/BatchNorm'.format( inserted_layer_counter)) inserted_layer_counter += 1 return image_feature, inserted_layer_counter def _compute_base_tower(self, tower_name_scope, image_feature, feature_index, has_different_feature_channels, target_channel, inserted_layer_counter): net = image_feature for i in range(self._num_layers_before_predictor): if self._use_depthwise: conv_op = functools.partial(slim.separable_conv2d, depth_multiplier=1) else: conv_op = slim.conv2d net = conv_op( net, self._depth, [self._kernel_size, self._kernel_size], stride=1, padding='SAME', activation_fn=None, normalizer_fn=(tf.identity if self._apply_batch_norm else None), scope='{}/conv2d_{}'.format(tower_name_scope, i)) if self._apply_batch_norm: net = slim.batch_norm( net, scope='{}/conv2d_{}/BatchNorm/feature_{}'. format(tower_name_scope, i, feature_index)) net = tf.nn.relu6(net) return net def _predict_head(self, head_name, head_obj, image_feature, box_tower_feature, feature_index, has_different_feature_channels, target_channel, inserted_layer_counter, num_predictions_per_location): if head_name == CLASS_PREDICTIONS_WITH_BACKGROUND: tower_name_scope = 'ClassPredictionTower' else: raise ValueError('Unknown head') if self._share_prediction_tower: head_tower_feature = box_tower_feature else: head_tower_feature = self._compute_base_tower( tower_name_scope=tower_name_scope, image_feature=image_feature, feature_index=feature_index, has_different_feature_channels=has_different_feature_channels, target_channel=target_channel, inserted_layer_counter=inserted_layer_counter) return head_obj.predict( features=head_tower_feature, num_predictions_per_location=num_predictions_per_location) def _predict(self, image_features, num_predictions_per_location_list): """Computes encoded object locations and corresponding confidences. Args: image_features: A list of float tensors of shape [batch_size, height_i, width_i, channels] containing features for a batch of images. Note that when not all tensors in the list have the same number of channels, an additional projection layer will be added on top the tensor to generate feature map with number of channels consitent with the majority. num_predictions_per_location_list: A list of integers representing the number of box predictions to be made per spatial location for each feature map. Note that all values must be the same since the weights are shared. Returns: A dictionary containing: box_encodings: A list of float tensors of shape [batch_size, num_anchors_i, code_size] representing the location of the objects. Each entry in the list corresponds to a feature map in the input `image_features` list. class_predictions_with_background: A list of float tensors of shape [batch_size, num_anchors_i, num_classes + 1] representing the class predictions for the proposals. Each entry in the list corresponds to a feature map in the input `image_features` list. (optional) mask_predictions: A list of float tensors of shape [batch_size, num_anchord_i, num_classes, mask_height, mask_width]. Raises: ValueError: If the image feature maps do not have the same number of channels or if the num predictions per locations is differs between the feature maps. """ if len(set(num_predictions_per_location_list)) > 1: raise ValueError('num predictions per location must be same for all' 'feature maps, found: {}'.format( num_predictions_per_location_list)) feature_channels = [ image_feature.shape[3].value for image_feature in image_features ] has_different_feature_channels = len(set(feature_channels)) > 1 if has_different_feature_channels: inserted_layer_counter = 0 target_channel = max(set(feature_channels), key=feature_channels.count) tf.logging.info('Not all feature maps have the same number of ' 'channels, found: {}, addition project layers ' 'to bring all feature maps to uniform channels ' 'of {}'.format(feature_channels, target_channel)) else: # Place holder variables if has_different_feature_channels is False. target_channel = -1 inserted_layer_counter = -1 predictions = { BOX_ENCODINGS: [], CLASS_PREDICTIONS_WITH_BACKGROUND: [], } for head_name in self._other_heads.keys(): predictions[head_name] = [] for feature_index, (image_feature, num_predictions_per_location) in enumerate( zip(image_features, num_predictions_per_location_list)): with tf.variable_scope('WeightSharedConvolutionalBoxPredictor', reuse=tf.AUTO_REUSE): with slim.arg_scope(self._conv_hyperparams_fn()): (image_feature, inserted_layer_counter) = self._insert_additional_projection_layer( image_feature, inserted_layer_counter, target_channel) if self._share_prediction_tower: box_tower_scope = 'PredictionTower' else: box_tower_scope = 'BoxPredictionTower' box_tower_feature = self._compute_base_tower( tower_name_scope=box_tower_scope, image_feature=image_feature, feature_index=feature_index, has_different_feature_channels=has_different_feature_channels, target_channel=target_channel, inserted_layer_counter=inserted_layer_counter) box_encodings = self._box_prediction_head.predict( features=box_tower_feature, num_predictions_per_location=num_predictions_per_location) predictions[BOX_ENCODINGS].append(box_encodings) sorted_keys = sorted(self._other_heads.keys()) sorted_keys.append(CLASS_PREDICTIONS_WITH_BACKGROUND) for head_name in sorted_keys: if head_name == CLASS_PREDICTIONS_WITH_BACKGROUND: head_obj = self._class_prediction_head else: head_obj = self._other_heads[head_name] prediction = self._predict_head( head_name=head_name, head_obj=head_obj, image_feature=image_feature, box_tower_feature=box_tower_feature, feature_index=feature_index, has_different_feature_channels=has_different_feature_channels, target_channel=target_channel, inserted_layer_counter=inserted_layer_counter, num_predictions_per_location=num_predictions_per_location) predictions[head_name].append(prediction) return predictions
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/predictors/convolutional_box_predictor.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Class Head. Contains Class prediction head classes for different meta architectures. All the class prediction heads have a predict function that receives the `features` as the first argument and returns class predictions with background. """ import tensorflow as tf from object_detection.predictors.heads import head class ConvolutionalClassHead(head.KerasHead): """Convolutional class prediction head.""" def __init__(self, is_training, num_class_slots, use_dropout, dropout_keep_prob, kernel_size, num_predictions_per_location, conv_hyperparams, freeze_batchnorm, class_prediction_bias_init=0.0, use_depthwise=False, name=None): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_class_slots: number of class slots. Note that num_class_slots may or may not include an implicit background category. use_dropout: Option to use dropout or not. Note that a single dropout op is applied here prior to both box and class predictions, which stands in contrast to the ConvolutionalBoxPredictor below. dropout_keep_prob: Keep probability for dropout. This is only used if use_dropout is True. kernel_size: Size of final convolution kernel. If the spatial resolution of the feature map is smaller than the kernel size, then the kernel size is automatically set to be min(feature_width, feature_height). num_predictions_per_location: Number of box predictions to be made per spatial location. Int specifying number of boxes per location. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. freeze_batchnorm: Bool. Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. class_prediction_bias_init: constant value to initialize bias of the last conv2d layer before class prediction. use_depthwise: Whether to use depthwise convolutions for prediction steps. Default is False. name: A string name scope to assign to the model. If `None`, Keras will auto-generate one from the class name. Raises: ValueError: if min_depth > max_depth. """ super(ConvolutionalClassHead, self).__init__(name=name) self._is_training = is_training self._use_dropout = use_dropout self._dropout_keep_prob = dropout_keep_prob self._kernel_size = kernel_size self._class_prediction_bias_init = class_prediction_bias_init self._use_depthwise = use_depthwise self._num_class_slots = num_class_slots self._class_predictor_layers = [] if self._use_dropout: self._class_predictor_layers.append( # The Dropout layer's `training` parameter for the call method must # be set implicitly by the Keras set_learning_phase. The object # detection training code takes care of this. tf.keras.layers.Dropout(rate=1.0 - self._dropout_keep_prob)) if self._use_depthwise: self._class_predictor_layers.append( tf.keras.layers.DepthwiseConv2D( [self._kernel_size, self._kernel_size], padding='SAME', depth_multiplier=1, strides=1, dilation_rate=1, name='ClassPredictor_depthwise', **conv_hyperparams.params())) self._class_predictor_layers.append( conv_hyperparams.build_batch_norm( training=(is_training and not freeze_batchnorm), name='ClassPredictor_depthwise_batchnorm')) self._class_predictor_layers.append( conv_hyperparams.build_activation_layer( name='ClassPredictor_depthwise_activation')) self._class_predictor_layers.append( tf.keras.layers.Conv2D( num_predictions_per_location * self._num_class_slots, [1, 1], name='ClassPredictor', **conv_hyperparams.params(use_bias=True))) else: self._class_predictor_layers.append( tf.keras.layers.Conv2D( num_predictions_per_location * self._num_class_slots, [self._kernel_size, self._kernel_size], padding='SAME', name='ClassPredictor', bias_initializer=tf.constant_initializer( self._class_prediction_bias_init), **conv_hyperparams.params(use_bias=True))) def _predict(self, features): """Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing image features. Returns: class_predictions_with_background: A float tensor of shape [batch_size, num_anchors, num_class_slots] representing the class predictions for the proposals. """ # Add a slot for the background class. class_predictions_with_background = features for layer in self._class_predictor_layers: class_predictions_with_background = layer( class_predictions_with_background) batch_size = features.get_shape().as_list()[0] if batch_size is None: batch_size = tf.shape(features)[0] class_predictions_with_background = tf.reshape( class_predictions_with_background, [batch_size, -1, self._num_class_slots]) return class_predictions_with_background
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/predictors/heads/keras_class_head.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.predictors.heads.box_head.""" import tensorflow as tf from google.protobuf import text_format from object_detection.builders import hyperparams_builder from object_detection.predictors.heads import keras_box_head from object_detection.protos import hyperparams_pb2 from object_detection.utils import test_case class ConvolutionalKerasBoxHeadTest(test_case.TestCase): def _build_conv_hyperparams(self): conv_hyperparams = hyperparams_pb2.Hyperparams() conv_hyperparams_text_proto = """ activation: NONE regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) def test_prediction_size_depthwise_false(self): conv_hyperparams = self._build_conv_hyperparams() box_prediction_head = keras_box_head.ConvolutionalBoxHead( is_training=True, box_code_size=4, kernel_size=3, conv_hyperparams=conv_hyperparams, freeze_batchnorm=False, num_predictions_per_location=1, use_depthwise=False) image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) box_encodings = box_prediction_head(image_feature) self.assertAllEqual([64, 323, 1, 4], box_encodings.get_shape().as_list()) # TODO(kaftan): Remove conditional after CMLE moves to TF 1.10 if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/predictors/heads/keras_box_head_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Mask Head. Contains Mask prediction head classes for different meta architectures. All the mask prediction heads have a predict function that receives the `features` as the first argument and returns `mask_predictions`. """ import math import tensorflow as tf from object_detection.predictors.heads import head from object_detection.utils import ops slim = tf.contrib.slim class MaskRCNNMaskHead(head.Head): """Mask RCNN mask prediction head. Please refer to Mask RCNN paper: https://arxiv.org/abs/1703.06870 """ def __init__(self, num_classes, conv_hyperparams_fn=None, mask_height=14, mask_width=14, mask_prediction_num_conv_layers=2, mask_prediction_conv_depth=256, masks_are_class_agnostic=False, convolve_then_upsample=False): """Constructor. Args: num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). conv_hyperparams_fn: A function to generate tf-slim arg_scope with hyperparameters for convolution ops. mask_height: Desired output mask height. The default value is 14. mask_width: Desired output mask width. The default value is 14. mask_prediction_num_conv_layers: Number of convolution layers applied to the image_features in mask prediction branch. mask_prediction_conv_depth: The depth for the first conv2d_transpose op applied to the image_features in the mask prediction branch. If set to 0, the depth of the convolution layers will be automatically chosen based on the number of object classes and the number of channels in the image features. masks_are_class_agnostic: Boolean determining if the mask-head is class-agnostic or not. convolve_then_upsample: Whether to apply convolutions on mask features before upsampling using nearest neighbor resizing. Otherwise, mask features are resized to [`mask_height`, `mask_width`] using bilinear resizing before applying convolutions. Raises: ValueError: conv_hyperparams_fn is None. """ super(MaskRCNNMaskHead, self).__init__() self._num_classes = num_classes self._conv_hyperparams_fn = conv_hyperparams_fn self._mask_height = mask_height self._mask_width = mask_width self._mask_prediction_num_conv_layers = mask_prediction_num_conv_layers self._mask_prediction_conv_depth = mask_prediction_conv_depth self._masks_are_class_agnostic = masks_are_class_agnostic self._convolve_then_upsample = convolve_then_upsample if conv_hyperparams_fn is None: raise ValueError('conv_hyperparams_fn is None.') def _get_mask_predictor_conv_depth(self, num_feature_channels, num_classes, class_weight=3.0, feature_weight=2.0): """Computes the depth of the mask predictor convolutions. Computes the depth of the mask predictor convolutions given feature channels and number of classes by performing a weighted average of the two in log space to compute the number of convolution channels. The weights that are used for computing the weighted average do not need to sum to 1. Args: num_feature_channels: An integer containing the number of feature channels. num_classes: An integer containing the number of classes. class_weight: Class weight used in computing the weighted average. feature_weight: Feature weight used in computing the weighted average. Returns: An integer containing the number of convolution channels used by mask predictor. """ num_feature_channels_log = math.log(float(num_feature_channels), 2.0) num_classes_log = math.log(float(num_classes), 2.0) weighted_num_feature_channels_log = ( num_feature_channels_log * feature_weight) weighted_num_classes_log = num_classes_log * class_weight total_weight = feature_weight + class_weight num_conv_channels_log = round( (weighted_num_feature_channels_log + weighted_num_classes_log) / total_weight) return int(math.pow(2.0, num_conv_channels_log)) def predict(self, features, num_predictions_per_location=1): """Performs mask prediction. Args: features: A float tensor of shape [batch_size, height, width, channels] containing features for a batch of images. num_predictions_per_location: Int containing number of predictions per location. Returns: instance_masks: A float tensor of shape [batch_size, 1, num_classes, mask_height, mask_width]. Raises: ValueError: If num_predictions_per_location is not 1. """ if num_predictions_per_location != 1: raise ValueError('Only num_predictions_per_location=1 is supported') num_conv_channels = self._mask_prediction_conv_depth if num_conv_channels == 0: num_feature_channels = features.get_shape().as_list()[3] num_conv_channels = self._get_mask_predictor_conv_depth( num_feature_channels, self._num_classes) with slim.arg_scope(self._conv_hyperparams_fn()): if not self._convolve_then_upsample: features = tf.image.resize_bilinear( features, [self._mask_height, self._mask_width], align_corners=True) for _ in range(self._mask_prediction_num_conv_layers - 1): features = slim.conv2d( features, num_outputs=num_conv_channels, kernel_size=[3, 3]) if self._convolve_then_upsample: # Replace Transposed Convolution with a Nearest Neighbor upsampling step # followed by 3x3 convolution. height_scale = self._mask_height / features.shape[1].value width_scale = self._mask_width / features.shape[2].value features = ops.nearest_neighbor_upsampling( features, height_scale=height_scale, width_scale=width_scale) features = slim.conv2d( features, num_outputs=num_conv_channels, kernel_size=[3, 3]) num_masks = 1 if self._masks_are_class_agnostic else self._num_classes mask_predictions = slim.conv2d( features, num_outputs=num_masks, activation_fn=None, normalizer_fn=None, kernel_size=[3, 3]) return tf.expand_dims( tf.transpose(mask_predictions, perm=[0, 3, 1, 2]), axis=1, name='MaskPredictor') class ConvolutionalMaskHead(head.Head): """Convolutional class prediction head.""" def __init__(self, is_training, num_classes, use_dropout, dropout_keep_prob, kernel_size, use_depthwise=False, mask_height=7, mask_width=7, masks_are_class_agnostic=False): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: Number of classes. use_dropout: Option to use dropout or not. Note that a single dropout op is applied here prior to both box and class predictions, which stands in contrast to the ConvolutionalBoxPredictor below. dropout_keep_prob: Keep probability for dropout. This is only used if use_dropout is True. kernel_size: Size of final convolution kernel. If the spatial resolution of the feature map is smaller than the kernel size, then the kernel size is automatically set to be min(feature_width, feature_height). use_depthwise: Whether to use depthwise convolutions for prediction steps. Default is False. mask_height: Desired output mask height. The default value is 7. mask_width: Desired output mask width. The default value is 7. masks_are_class_agnostic: Boolean determining if the mask-head is class-agnostic or not. Raises: ValueError: if min_depth > max_depth. """ super(ConvolutionalMaskHead, self).__init__() self._is_training = is_training self._num_classes = num_classes self._use_dropout = use_dropout self._dropout_keep_prob = dropout_keep_prob self._kernel_size = kernel_size self._use_depthwise = use_depthwise self._mask_height = mask_height self._mask_width = mask_width self._masks_are_class_agnostic = masks_are_class_agnostic def predict(self, features, num_predictions_per_location): """Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing image features. num_predictions_per_location: Number of box predictions to be made per spatial location. Returns: mask_predictions: A float tensors of shape [batch_size, num_anchors, num_masks, mask_height, mask_width] representing the mask predictions for the proposals. """ image_feature = features # Add a slot for the background class. if self._masks_are_class_agnostic: num_masks = 1 else: num_masks = self._num_classes num_mask_channels = num_masks * self._mask_height * self._mask_width net = image_feature if self._use_dropout: net = slim.dropout(net, keep_prob=self._dropout_keep_prob) if self._use_depthwise: mask_predictions = slim.separable_conv2d( net, None, [self._kernel_size, self._kernel_size], padding='SAME', depth_multiplier=1, stride=1, rate=1, scope='MaskPredictor_depthwise') mask_predictions = slim.conv2d( mask_predictions, num_predictions_per_location * num_mask_channels, [1, 1], activation_fn=None, normalizer_fn=None, normalizer_params=None, scope='MaskPredictor') else: mask_predictions = slim.conv2d( net, num_predictions_per_location * num_mask_channels, [self._kernel_size, self._kernel_size], activation_fn=None, normalizer_fn=None, normalizer_params=None, scope='MaskPredictor') batch_size = features.get_shape().as_list()[0] if batch_size is None: batch_size = tf.shape(features)[0] mask_predictions = tf.reshape( mask_predictions, [batch_size, -1, num_masks, self._mask_height, self._mask_width]) return mask_predictions # TODO(alirezafathi): See if possible to unify Weight Shared with regular # convolutional mask head. class WeightSharedConvolutionalMaskHead(head.Head): """Weight shared convolutional mask prediction head.""" def __init__(self, num_classes, kernel_size=3, use_dropout=False, dropout_keep_prob=0.8, mask_height=7, mask_width=7, masks_are_class_agnostic=False): """Constructor. Args: num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). kernel_size: Size of final convolution kernel. use_dropout: Whether to apply dropout to class prediction head. dropout_keep_prob: Probability of keeping activiations. mask_height: Desired output mask height. The default value is 7. mask_width: Desired output mask width. The default value is 7. masks_are_class_agnostic: Boolean determining if the mask-head is class-agnostic or not. """ super(WeightSharedConvolutionalMaskHead, self).__init__() self._num_classes = num_classes self._kernel_size = kernel_size self._use_dropout = use_dropout self._dropout_keep_prob = dropout_keep_prob self._mask_height = mask_height self._mask_width = mask_width self._masks_are_class_agnostic = masks_are_class_agnostic def predict(self, features, num_predictions_per_location): """Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing image features. num_predictions_per_location: Number of box predictions to be made per spatial location. Returns: mask_predictions: A tensor of shape [batch_size, num_anchors, num_classes, mask_height, mask_width] representing the mask predictions for the proposals. """ mask_predictions_net = features if self._masks_are_class_agnostic: num_masks = 1 else: num_masks = self._num_classes num_mask_channels = num_masks * self._mask_height * self._mask_width if self._use_dropout: mask_predictions_net = slim.dropout( mask_predictions_net, keep_prob=self._dropout_keep_prob) mask_predictions = slim.conv2d( mask_predictions_net, num_predictions_per_location * num_mask_channels, [self._kernel_size, self._kernel_size], activation_fn=None, stride=1, padding='SAME', normalizer_fn=None, scope='MaskPredictor') batch_size = features.get_shape().as_list()[0] if batch_size is None: batch_size = tf.shape(features)[0] mask_predictions = tf.reshape( mask_predictions, [batch_size, -1, num_masks, self._mask_height, self._mask_width]) return mask_predictions
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/predictors/heads/mask_head.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.predictors.heads.box_head.""" import tensorflow as tf from google.protobuf import text_format from object_detection.builders import hyperparams_builder from object_detection.predictors.heads import box_head from object_detection.protos import hyperparams_pb2 from object_detection.utils import test_case class MaskRCNNBoxHeadTest(test_case.TestCase): def _build_arg_scope_with_hyperparams(self, op_type=hyperparams_pb2.Hyperparams.FC): hyperparams = hyperparams_pb2.Hyperparams() hyperparams_text_proto = """ activation: NONE regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(hyperparams_text_proto, hyperparams) hyperparams.op = op_type return hyperparams_builder.build(hyperparams, is_training=True) def test_prediction_size(self): box_prediction_head = box_head.MaskRCNNBoxHead( is_training=False, num_classes=20, fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(), use_dropout=True, dropout_keep_prob=0.5, box_code_size=4, share_box_across_classes=False) roi_pooled_features = tf.random_uniform( [64, 7, 7, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) prediction = box_prediction_head.predict( features=roi_pooled_features, num_predictions_per_location=1) self.assertAllEqual([64, 1, 20, 4], prediction.get_shape().as_list()) class ConvolutionalBoxPredictorTest(test_case.TestCase): def _build_arg_scope_with_hyperparams( self, op_type=hyperparams_pb2.Hyperparams.CONV): hyperparams = hyperparams_pb2.Hyperparams() hyperparams_text_proto = """ activation: NONE regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(hyperparams_text_proto, hyperparams) hyperparams.op = op_type return hyperparams_builder.build(hyperparams, is_training=True) def test_prediction_size(self): box_prediction_head = box_head.ConvolutionalBoxHead( is_training=True, box_code_size=4, kernel_size=3) image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) box_encodings = box_prediction_head.predict( features=image_feature, num_predictions_per_location=1) self.assertAllEqual([64, 323, 1, 4], box_encodings.get_shape().as_list()) class WeightSharedConvolutionalBoxPredictorTest(test_case.TestCase): def _build_arg_scope_with_hyperparams( self, op_type=hyperparams_pb2.Hyperparams.CONV): hyperparams = hyperparams_pb2.Hyperparams() hyperparams_text_proto = """ activation: NONE regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(hyperparams_text_proto, hyperparams) hyperparams.op = op_type return hyperparams_builder.build(hyperparams, is_training=True) def test_prediction_size(self): box_prediction_head = box_head.WeightSharedConvolutionalBoxHead( box_code_size=4) image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) box_encodings = box_prediction_head.predict( features=image_feature, num_predictions_per_location=1) self.assertAllEqual([64, 323, 4], box_encodings.get_shape().as_list()) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/predictors/heads/box_head_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Box Head. Contains Box prediction head classes for different meta architectures. All the box prediction heads have a predict function that receives the `features` as the first argument and returns `box_encodings`. """ import functools import tensorflow as tf from object_detection.predictors.heads import head slim = tf.contrib.slim class MaskRCNNBoxHead(head.Head): """Box prediction head. Please refer to Mask RCNN paper: https://arxiv.org/abs/1703.06870 """ def __init__(self, is_training, num_classes, fc_hyperparams_fn, use_dropout, dropout_keep_prob, box_code_size, share_box_across_classes=False): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). fc_hyperparams_fn: A function to generate tf-slim arg_scope with hyperparameters for fully connected ops. use_dropout: Option to use dropout or not. Note that a single dropout op is applied here prior to both box and class predictions, which stands in contrast to the ConvolutionalBoxPredictor below. dropout_keep_prob: Keep probability for dropout. This is only used if use_dropout is True. box_code_size: Size of encoding for each box. share_box_across_classes: Whether to share boxes across classes rather than use a different box for each class. """ super(MaskRCNNBoxHead, self).__init__() self._is_training = is_training self._num_classes = num_classes self._fc_hyperparams_fn = fc_hyperparams_fn self._use_dropout = use_dropout self._dropout_keep_prob = dropout_keep_prob self._box_code_size = box_code_size self._share_box_across_classes = share_box_across_classes def predict(self, features, num_predictions_per_location=1): """Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing features for a batch of images. num_predictions_per_location: Int containing number of predictions per location. Returns: box_encodings: A float tensor of shape [batch_size, 1, num_classes, code_size] representing the location of the objects. Raises: ValueError: If num_predictions_per_location is not 1. """ if num_predictions_per_location != 1: raise ValueError('Only num_predictions_per_location=1 is supported') spatial_averaged_roi_pooled_features = tf.reduce_mean( features, [1, 2], keep_dims=True, name='AvgPool') flattened_roi_pooled_features = slim.flatten( spatial_averaged_roi_pooled_features) if self._use_dropout: flattened_roi_pooled_features = slim.dropout( flattened_roi_pooled_features, keep_prob=self._dropout_keep_prob, is_training=self._is_training) number_of_boxes = 1 if not self._share_box_across_classes: number_of_boxes = self._num_classes with slim.arg_scope(self._fc_hyperparams_fn()): box_encodings = slim.fully_connected( flattened_roi_pooled_features, number_of_boxes * self._box_code_size, activation_fn=None, scope='BoxEncodingPredictor') box_encodings = tf.reshape(box_encodings, [-1, 1, number_of_boxes, self._box_code_size]) return box_encodings class ConvolutionalBoxHead(head.Head): """Convolutional box prediction head.""" def __init__(self, is_training, box_code_size, kernel_size, use_depthwise=False): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. box_code_size: Size of encoding for each box. kernel_size: Size of final convolution kernel. If the spatial resolution of the feature map is smaller than the kernel size, then the kernel size is automatically set to be min(feature_width, feature_height). use_depthwise: Whether to use depthwise convolutions for prediction steps. Default is False. Raises: ValueError: if min_depth > max_depth. """ super(ConvolutionalBoxHead, self).__init__() self._is_training = is_training self._box_code_size = box_code_size self._kernel_size = kernel_size self._use_depthwise = use_depthwise def predict(self, features, num_predictions_per_location): """Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing image features. num_predictions_per_location: Number of box predictions to be made per spatial location. Int specifying number of boxes per location. Returns: box_encodings: A float tensors of shape [batch_size, num_anchors, q, code_size] representing the location of the objects, where q is 1 or the number of classes. """ net = features if self._use_depthwise: box_encodings = slim.separable_conv2d( net, None, [self._kernel_size, self._kernel_size], padding='SAME', depth_multiplier=1, stride=1, rate=1, scope='BoxEncodingPredictor_depthwise') box_encodings = slim.conv2d( box_encodings, num_predictions_per_location * self._box_code_size, [1, 1], activation_fn=None, normalizer_fn=None, normalizer_params=None, scope='BoxEncodingPredictor') else: box_encodings = slim.conv2d( net, num_predictions_per_location * self._box_code_size, [self._kernel_size, self._kernel_size], activation_fn=None, normalizer_fn=None, normalizer_params=None, scope='BoxEncodingPredictor') batch_size = features.get_shape().as_list()[0] if batch_size is None: batch_size = tf.shape(features)[0] box_encodings = tf.reshape(box_encodings, [batch_size, -1, 1, self._box_code_size]) return box_encodings # TODO(alirezafathi): See if possible to unify Weight Shared with regular # convolutional box head. class WeightSharedConvolutionalBoxHead(head.Head): """Weight shared convolutional box prediction head. This head allows sharing the same set of parameters (weights) when called more then once on different feature maps. """ def __init__(self, box_code_size, kernel_size=3, use_depthwise=False, box_encodings_clip_range=None): """Constructor. Args: box_code_size: Size of encoding for each box. kernel_size: Size of final convolution kernel. use_depthwise: Whether to use depthwise convolutions for prediction steps. Default is False. box_encodings_clip_range: Min and max values for clipping box_encodings. """ super(WeightSharedConvolutionalBoxHead, self).__init__() self._box_code_size = box_code_size self._kernel_size = kernel_size self._use_depthwise = use_depthwise self._box_encodings_clip_range = box_encodings_clip_range def predict(self, features, num_predictions_per_location): """Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing image features. num_predictions_per_location: Number of box predictions to be made per spatial location. Returns: box_encodings: A float tensor of shape [batch_size, num_anchors, code_size] representing the location of the objects. """ box_encodings_net = features if self._use_depthwise: conv_op = functools.partial(slim.separable_conv2d, depth_multiplier=1) else: conv_op = slim.conv2d box_encodings = conv_op( box_encodings_net, num_predictions_per_location * self._box_code_size, [self._kernel_size, self._kernel_size], activation_fn=None, stride=1, padding='SAME', normalizer_fn=None, scope='BoxPredictor') batch_size = features.get_shape().as_list()[0] if batch_size is None: batch_size = tf.shape(features)[0] # Clipping the box encodings to make the inference graph TPU friendly. if self._box_encodings_clip_range is not None: box_encodings = tf.clip_by_value( box_encodings, self._box_encodings_clip_range.min, self._box_encodings_clip_range.max) box_encodings = tf.reshape(box_encodings, [batch_size, -1, self._box_code_size]) return box_encodings
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/predictors/heads/box_head.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras Mask Heads. Contains Mask prediction head classes for different meta architectures. All the mask prediction heads have a predict function that receives the `features` as the first argument and returns `mask_predictions`. """ import tensorflow as tf from object_detection.predictors.heads import head class ConvolutionalMaskHead(head.KerasHead): """Convolutional class prediction head.""" def __init__(self, is_training, num_classes, use_dropout, dropout_keep_prob, kernel_size, num_predictions_per_location, conv_hyperparams, freeze_batchnorm, use_depthwise=False, mask_height=7, mask_width=7, masks_are_class_agnostic=False, name=None): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: Number of classes. use_dropout: Option to use dropout or not. Note that a single dropout op is applied here prior to both box and class predictions, which stands in contrast to the ConvolutionalBoxPredictor below. dropout_keep_prob: Keep probability for dropout. This is only used if use_dropout is True. kernel_size: Size of final convolution kernel. If the spatial resolution of the feature map is smaller than the kernel size, then the kernel size is automatically set to be min(feature_width, feature_height). num_predictions_per_location: Number of box predictions to be made per spatial location. Int specifying number of boxes per location. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. freeze_batchnorm: Bool. Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. use_depthwise: Whether to use depthwise convolutions for prediction steps. Default is False. mask_height: Desired output mask height. The default value is 7. mask_width: Desired output mask width. The default value is 7. masks_are_class_agnostic: Boolean determining if the mask-head is class-agnostic or not. name: A string name scope to assign to the model. If `None`, Keras will auto-generate one from the class name. Raises: ValueError: if min_depth > max_depth. """ super(ConvolutionalMaskHead, self).__init__(name=name) self._is_training = is_training self._num_classes = num_classes self._use_dropout = use_dropout self._dropout_keep_prob = dropout_keep_prob self._kernel_size = kernel_size self._num_predictions_per_location = num_predictions_per_location self._use_depthwise = use_depthwise self._mask_height = mask_height self._mask_width = mask_width self._masks_are_class_agnostic = masks_are_class_agnostic self._mask_predictor_layers = [] # Add a slot for the background class. if self._masks_are_class_agnostic: self._num_masks = 1 else: self._num_masks = self._num_classes num_mask_channels = self._num_masks * self._mask_height * self._mask_width if self._use_dropout: self._mask_predictor_layers.append( # The Dropout layer's `training` parameter for the call method must # be set implicitly by the Keras set_learning_phase. The object # detection training code takes care of this. tf.keras.layers.Dropout(rate=1.0 - self._dropout_keep_prob)) if self._use_depthwise: self._mask_predictor_layers.append( tf.keras.layers.DepthwiseConv2D( [self._kernel_size, self._kernel_size], padding='SAME', depth_multiplier=1, strides=1, dilation_rate=1, name='MaskPredictor_depthwise', **conv_hyperparams.params())) self._mask_predictor_layers.append( conv_hyperparams.build_batch_norm( training=(is_training and not freeze_batchnorm), name='MaskPredictor_depthwise_batchnorm')) self._mask_predictor_layers.append( conv_hyperparams.build_activation_layer( name='MaskPredictor_depthwise_activation')) self._mask_predictor_layers.append( tf.keras.layers.Conv2D( num_predictions_per_location * num_mask_channels, [1, 1], name='MaskPredictor', **conv_hyperparams.params(use_bias=True))) else: self._mask_predictor_layers.append( tf.keras.layers.Conv2D( num_predictions_per_location * num_mask_channels, [self._kernel_size, self._kernel_size], padding='SAME', name='MaskPredictor', **conv_hyperparams.params(use_bias=True))) def _predict(self, features): """Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing image features. Returns: mask_predictions: A float tensors of shape [batch_size, num_anchors, num_masks, mask_height, mask_width] representing the mask predictions for the proposals. """ mask_predictions = features for layer in self._mask_predictor_layers: mask_predictions = layer(mask_predictions) batch_size = features.get_shape().as_list()[0] if batch_size is None: batch_size = tf.shape(features)[0] mask_predictions = tf.reshape( mask_predictions, [batch_size, -1, self._num_masks, self._mask_height, self._mask_width]) return mask_predictions
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/predictors/heads/keras_mask_head.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keypoint Head. Contains Keypoint prediction head classes for different meta architectures. All the keypoint prediction heads have a predict function that receives the `features` as the first argument and returns `keypoint_predictions`. Keypoints could be used to represent the human body joint locations as in Mask RCNN paper. Or they could be used to represent different part locations of objects. """ import tensorflow as tf from object_detection.predictors.heads import head slim = tf.contrib.slim class MaskRCNNKeypointHead(head.Head): """Mask RCNN keypoint prediction head. Please refer to Mask RCNN paper: https://arxiv.org/abs/1703.06870 """ def __init__(self, num_keypoints=17, conv_hyperparams_fn=None, keypoint_heatmap_height=56, keypoint_heatmap_width=56, keypoint_prediction_num_conv_layers=8, keypoint_prediction_conv_depth=512): """Constructor. Args: num_keypoints: (int scalar) number of keypoints. conv_hyperparams_fn: A function to generate tf-slim arg_scope with hyperparameters for convolution ops. keypoint_heatmap_height: Desired output mask height. The default value is 14. keypoint_heatmap_width: Desired output mask width. The default value is 14. keypoint_prediction_num_conv_layers: Number of convolution layers applied to the image_features in mask prediction branch. keypoint_prediction_conv_depth: The depth for the first conv2d_transpose op applied to the image_features in the mask prediction branch. If set to 0, the depth of the convolution layers will be automatically chosen based on the number of object classes and the number of channels in the image features. """ super(MaskRCNNKeypointHead, self).__init__() self._num_keypoints = num_keypoints self._conv_hyperparams_fn = conv_hyperparams_fn self._keypoint_heatmap_height = keypoint_heatmap_height self._keypoint_heatmap_width = keypoint_heatmap_width self._keypoint_prediction_num_conv_layers = ( keypoint_prediction_num_conv_layers) self._keypoint_prediction_conv_depth = keypoint_prediction_conv_depth def predict(self, features, num_predictions_per_location=1): """Performs keypoint prediction. Args: features: A float tensor of shape [batch_size, height, width, channels] containing features for a batch of images. num_predictions_per_location: Int containing number of predictions per location. Returns: instance_masks: A float tensor of shape [batch_size, 1, num_keypoints, heatmap_height, heatmap_width]. Raises: ValueError: If num_predictions_per_location is not 1. """ if num_predictions_per_location != 1: raise ValueError('Only num_predictions_per_location=1 is supported') with slim.arg_scope(self._conv_hyperparams_fn()): net = slim.conv2d( features, self._keypoint_prediction_conv_depth, [3, 3], scope='conv_1') for i in range(1, self._keypoint_prediction_num_conv_layers): net = slim.conv2d( net, self._keypoint_prediction_conv_depth, [3, 3], scope='conv_%d' % (i + 1)) net = slim.conv2d_transpose( net, self._num_keypoints, [2, 2], scope='deconv1') heatmaps_mask = tf.image.resize_bilinear( net, [self._keypoint_heatmap_height, self._keypoint_heatmap_width], align_corners=True, name='upsample') return tf.expand_dims( tf.transpose(heatmaps_mask, perm=[0, 3, 1, 2]), axis=1, name='KeypointPredictor')
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/predictors/heads/keypoint_head.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Box Head. Contains Box prediction head classes for different meta architectures. All the box prediction heads have a _predict function that receives the `features` as the first argument and returns `box_encodings`. """ import tensorflow as tf from object_detection.predictors.heads import head class ConvolutionalBoxHead(head.KerasHead): """Convolutional box prediction head.""" def __init__(self, is_training, box_code_size, kernel_size, num_predictions_per_location, conv_hyperparams, freeze_batchnorm, use_depthwise=True, name=None): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. box_code_size: Size of encoding for each box. kernel_size: Size of final convolution kernel. If the spatial resolution of the feature map is smaller than the kernel size, then the kernel size is automatically set to be min(feature_width, feature_height). num_predictions_per_location: Number of box predictions to be made per spatial location. Int specifying number of boxes per location. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. freeze_batchnorm: Bool. Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. use_depthwise: Whether to use depthwise convolutions for prediction steps. Default is False. name: A string name scope to assign to the model. If `None`, Keras will auto-generate one from the class name. Raises: ValueError: if min_depth > max_depth. """ super(ConvolutionalBoxHead, self).__init__(name=name) self._is_training = is_training self._box_code_size = box_code_size self._kernel_size = kernel_size self._num_predictions_per_location = num_predictions_per_location self._use_depthwise = use_depthwise self._box_encoder_layers = [] if self._use_depthwise: self._box_encoder_layers.append( tf.keras.layers.DepthwiseConv2D( [self._kernel_size, self._kernel_size], padding='SAME', depth_multiplier=1, strides=1, dilation_rate=1, name='BoxEncodingPredictor_depthwise', **conv_hyperparams.params())) self._box_encoder_layers.append( conv_hyperparams.build_batch_norm( training=(is_training and not freeze_batchnorm), name='BoxEncodingPredictor_depthwise_batchnorm')) self._box_encoder_layers.append( conv_hyperparams.build_activation_layer( name='BoxEncodingPredictor_depthwise_activation')) self._box_encoder_layers.append( tf.keras.layers.Conv2D( num_predictions_per_location * self._box_code_size, [1, 1], name='BoxEncodingPredictor', **conv_hyperparams.params(use_bias=True))) else: self._box_encoder_layers.append( tf.keras.layers.Conv2D( num_predictions_per_location * self._box_code_size, [self._kernel_size, self._kernel_size], padding='SAME', name='BoxEncodingPredictor', **conv_hyperparams.params(use_bias=True))) def _predict(self, features): """Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing image features. Returns: box_encodings: A float tensor of shape [batch_size, num_anchors, q, code_size] representing the location of the objects, where q is 1 or the number of classes. """ box_encodings = features for layer in self._box_encoder_layers: box_encodings = layer(box_encodings) batch_size = features.get_shape().as_list()[0] if batch_size is None: batch_size = tf.shape(features)[0] box_encodings = tf.reshape(box_encodings, [batch_size, -1, 1, self._box_code_size]) return box_encodings
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/predictors/heads/keras_box_head.py
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/predictors/heads/__init__.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.predictors.heads.keypoint_head.""" import tensorflow as tf from google.protobuf import text_format from object_detection.builders import hyperparams_builder from object_detection.predictors.heads import keypoint_head from object_detection.protos import hyperparams_pb2 from object_detection.utils import test_case class MaskRCNNKeypointHeadTest(test_case.TestCase): def _build_arg_scope_with_hyperparams(self, op_type=hyperparams_pb2.Hyperparams.FC): hyperparams = hyperparams_pb2.Hyperparams() hyperparams_text_proto = """ activation: NONE regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(hyperparams_text_proto, hyperparams) hyperparams.op = op_type return hyperparams_builder.build(hyperparams, is_training=True) def test_prediction_size(self): keypoint_prediction_head = keypoint_head.MaskRCNNKeypointHead( conv_hyperparams_fn=self._build_arg_scope_with_hyperparams()) roi_pooled_features = tf.random_uniform( [64, 14, 14, 1024], minval=-2.0, maxval=2.0, dtype=tf.float32) prediction = keypoint_prediction_head.predict( features=roi_pooled_features, num_predictions_per_location=1) self.assertAllEqual([64, 1, 17, 56, 56], prediction.get_shape().as_list()) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/predictors/heads/keypoint_head_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.predictors.heads.class_head.""" import tensorflow as tf from google.protobuf import text_format from object_detection.builders import hyperparams_builder from object_detection.predictors.heads import keras_class_head from object_detection.protos import hyperparams_pb2 from object_detection.utils import test_case class ConvolutionalKerasClassPredictorTest(test_case.TestCase): def _build_conv_hyperparams(self): conv_hyperparams = hyperparams_pb2.Hyperparams() conv_hyperparams_text_proto = """ activation: NONE regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) def test_prediction_size_depthwise_false(self): conv_hyperparams = self._build_conv_hyperparams() class_prediction_head = keras_class_head.ConvolutionalClassHead( is_training=True, num_class_slots=20, use_dropout=True, dropout_keep_prob=0.5, kernel_size=3, conv_hyperparams=conv_hyperparams, freeze_batchnorm=False, num_predictions_per_location=1, use_depthwise=False) image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) class_predictions = class_prediction_head(image_feature,) self.assertAllEqual([64, 323, 20], class_predictions.get_shape().as_list()) # TODO(kaftan): Remove conditional after CMLE moves to TF 1.10 if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/predictors/heads/keras_class_head_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Base head class. All the different kinds of prediction heads in different models will inherit from this class. What is in common between all head classes is that they have a `predict` function that receives `features` as its first argument. How to add a new prediction head to an existing meta architecture? For example, how can we add a `3d shape` prediction head to Mask RCNN? We have to take the following steps to add a new prediction head to an existing meta arch: (a) Add a class for predicting the head. This class should inherit from the `Head` class below and have a `predict` function that receives the features and predicts the output. The output is always a tf.float32 tensor. (b) Add the head to the meta architecture. For example in case of Mask RCNN, go to box_predictor_builder and put in the logic for adding the new head to the Mask RCNN box predictor. (c) Add the logic for computing the loss for the new head. (d) Add the necessary metrics for the new head. (e) (optional) Add visualization for the new head. """ from abc import abstractmethod import tensorflow as tf class Head(object): """Mask RCNN head base class.""" def __init__(self): """Constructor.""" pass @abstractmethod def predict(self, features, num_predictions_per_location): """Returns the head's predictions. Args: features: A float tensor of features. num_predictions_per_location: Int containing number of predictions per location. Returns: A tf.float32 tensor. """ pass class KerasHead(tf.keras.Model): """Keras head base class.""" def call(self, features): """The Keras model call will delegate to the `_predict` method.""" return self._predict(features) @abstractmethod def _predict(self, features): """Returns the head's predictions. Args: features: A float tensor of features. Returns: A tf.float32 tensor. """ pass
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/predictors/heads/head.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.predictors.heads.class_head.""" import tensorflow as tf from google.protobuf import text_format from object_detection.builders import hyperparams_builder from object_detection.predictors.heads import class_head from object_detection.protos import hyperparams_pb2 from object_detection.utils import test_case class MaskRCNNClassHeadTest(test_case.TestCase): def _build_arg_scope_with_hyperparams(self, op_type=hyperparams_pb2.Hyperparams.FC): hyperparams = hyperparams_pb2.Hyperparams() hyperparams_text_proto = """ activation: NONE regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(hyperparams_text_proto, hyperparams) hyperparams.op = op_type return hyperparams_builder.build(hyperparams, is_training=True) def test_prediction_size(self): class_prediction_head = class_head.MaskRCNNClassHead( is_training=False, num_class_slots=20, fc_hyperparams_fn=self._build_arg_scope_with_hyperparams(), use_dropout=True, dropout_keep_prob=0.5) roi_pooled_features = tf.random_uniform( [64, 7, 7, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) prediction = class_prediction_head.predict( features=roi_pooled_features, num_predictions_per_location=1) self.assertAllEqual([64, 1, 20], prediction.get_shape().as_list()) class ConvolutionalClassPredictorTest(test_case.TestCase): def _build_arg_scope_with_hyperparams( self, op_type=hyperparams_pb2.Hyperparams.CONV): hyperparams = hyperparams_pb2.Hyperparams() hyperparams_text_proto = """ activation: NONE regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(hyperparams_text_proto, hyperparams) hyperparams.op = op_type return hyperparams_builder.build(hyperparams, is_training=True) def test_prediction_size(self): class_prediction_head = class_head.ConvolutionalClassHead( is_training=True, num_class_slots=20, use_dropout=True, dropout_keep_prob=0.5, kernel_size=3) image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) class_predictions = class_prediction_head.predict( features=image_feature, num_predictions_per_location=1) self.assertAllEqual([64, 323, 20], class_predictions.get_shape().as_list()) class WeightSharedConvolutionalClassPredictorTest(test_case.TestCase): def _build_arg_scope_with_hyperparams( self, op_type=hyperparams_pb2.Hyperparams.CONV): hyperparams = hyperparams_pb2.Hyperparams() hyperparams_text_proto = """ activation: NONE regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(hyperparams_text_proto, hyperparams) hyperparams.op = op_type return hyperparams_builder.build(hyperparams, is_training=True) def test_prediction_size(self): class_prediction_head = ( class_head.WeightSharedConvolutionalClassHead(num_class_slots=20)) image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) class_predictions = class_prediction_head.predict( features=image_feature, num_predictions_per_location=1) self.assertAllEqual([64, 323, 20], class_predictions.get_shape().as_list()) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/predictors/heads/class_head_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.predictors.heads.mask_head.""" import tensorflow as tf from google.protobuf import text_format from object_detection.builders import hyperparams_builder from object_detection.predictors.heads import keras_mask_head from object_detection.protos import hyperparams_pb2 from object_detection.utils import test_case class ConvolutionalMaskPredictorTest(test_case.TestCase): def _build_conv_hyperparams(self): conv_hyperparams = hyperparams_pb2.Hyperparams() conv_hyperparams_text_proto = """ activation: NONE regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams) def test_prediction_size_use_depthwise_false(self): conv_hyperparams = self._build_conv_hyperparams() mask_prediction_head = keras_mask_head.ConvolutionalMaskHead( is_training=True, num_classes=20, use_dropout=True, dropout_keep_prob=0.5, kernel_size=3, conv_hyperparams=conv_hyperparams, freeze_batchnorm=False, num_predictions_per_location=1, use_depthwise=False, mask_height=7, mask_width=7) image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) mask_predictions = mask_prediction_head(image_feature) self.assertAllEqual([64, 323, 20, 7, 7], mask_predictions.get_shape().as_list()) # TODO(kaftan): Remove conditional after CMLE moves to TF 1.10 def test_class_agnostic_prediction_size_use_depthwise_false(self): conv_hyperparams = self._build_conv_hyperparams() mask_prediction_head = keras_mask_head.ConvolutionalMaskHead( is_training=True, num_classes=20, use_dropout=True, dropout_keep_prob=0.5, kernel_size=3, conv_hyperparams=conv_hyperparams, freeze_batchnorm=False, num_predictions_per_location=1, use_depthwise=False, mask_height=7, mask_width=7, masks_are_class_agnostic=True) image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) mask_predictions = mask_prediction_head(image_feature) self.assertAllEqual([64, 323, 1, 7, 7], mask_predictions.get_shape().as_list()) # TODO(kaftan): Remove conditional after CMLE moves to TF 1.10 if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/predictors/heads/keras_mask_head_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.predictors.heads.mask_head.""" import tensorflow as tf from google.protobuf import text_format from object_detection.builders import hyperparams_builder from object_detection.predictors.heads import mask_head from object_detection.protos import hyperparams_pb2 from object_detection.utils import test_case class MaskRCNNMaskHeadTest(test_case.TestCase): def _build_arg_scope_with_hyperparams(self, op_type=hyperparams_pb2.Hyperparams.FC): hyperparams = hyperparams_pb2.Hyperparams() hyperparams_text_proto = """ activation: NONE regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(hyperparams_text_proto, hyperparams) hyperparams.op = op_type return hyperparams_builder.build(hyperparams, is_training=True) def test_prediction_size(self): mask_prediction_head = mask_head.MaskRCNNMaskHead( num_classes=20, conv_hyperparams_fn=self._build_arg_scope_with_hyperparams(), mask_height=14, mask_width=14, mask_prediction_num_conv_layers=2, mask_prediction_conv_depth=256, masks_are_class_agnostic=False) roi_pooled_features = tf.random_uniform( [64, 7, 7, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) prediction = mask_prediction_head.predict( features=roi_pooled_features, num_predictions_per_location=1) self.assertAllEqual([64, 1, 20, 14, 14], prediction.get_shape().as_list()) def test_prediction_size_with_convolve_then_upsample(self): mask_prediction_head = mask_head.MaskRCNNMaskHead( num_classes=20, conv_hyperparams_fn=self._build_arg_scope_with_hyperparams(), mask_height=28, mask_width=28, mask_prediction_num_conv_layers=2, mask_prediction_conv_depth=256, masks_are_class_agnostic=True, convolve_then_upsample=True) roi_pooled_features = tf.random_uniform( [64, 14, 14, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) prediction = mask_prediction_head.predict( features=roi_pooled_features, num_predictions_per_location=1) self.assertAllEqual([64, 1, 1, 28, 28], prediction.get_shape().as_list()) class ConvolutionalMaskPredictorTest(test_case.TestCase): def _build_arg_scope_with_hyperparams( self, op_type=hyperparams_pb2.Hyperparams.CONV): hyperparams = hyperparams_pb2.Hyperparams() hyperparams_text_proto = """ activation: NONE regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(hyperparams_text_proto, hyperparams) hyperparams.op = op_type return hyperparams_builder.build(hyperparams, is_training=True) def test_prediction_size(self): mask_prediction_head = mask_head.ConvolutionalMaskHead( is_training=True, num_classes=20, use_dropout=True, dropout_keep_prob=0.5, kernel_size=3, mask_height=7, mask_width=7) image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) mask_predictions = mask_prediction_head.predict( features=image_feature, num_predictions_per_location=1) self.assertAllEqual([64, 323, 20, 7, 7], mask_predictions.get_shape().as_list()) def test_class_agnostic_prediction_size(self): mask_prediction_head = mask_head.ConvolutionalMaskHead( is_training=True, num_classes=20, use_dropout=True, dropout_keep_prob=0.5, kernel_size=3, mask_height=7, mask_width=7, masks_are_class_agnostic=True) image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) mask_predictions = mask_prediction_head.predict( features=image_feature, num_predictions_per_location=1) self.assertAllEqual([64, 323, 1, 7, 7], mask_predictions.get_shape().as_list()) class WeightSharedConvolutionalMaskPredictorTest(test_case.TestCase): def _build_arg_scope_with_hyperparams( self, op_type=hyperparams_pb2.Hyperparams.CONV): hyperparams = hyperparams_pb2.Hyperparams() hyperparams_text_proto = """ activation: NONE regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(hyperparams_text_proto, hyperparams) hyperparams.op = op_type return hyperparams_builder.build(hyperparams, is_training=True) def test_prediction_size(self): mask_prediction_head = ( mask_head.WeightSharedConvolutionalMaskHead( num_classes=20, mask_height=7, mask_width=7)) image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) mask_predictions = mask_prediction_head.predict( features=image_feature, num_predictions_per_location=1) self.assertAllEqual([64, 323, 20, 7, 7], mask_predictions.get_shape().as_list()) def test_class_agnostic_prediction_size(self): mask_prediction_head = ( mask_head.WeightSharedConvolutionalMaskHead( num_classes=20, mask_height=7, mask_width=7, masks_are_class_agnostic=True)) image_feature = tf.random_uniform( [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32) mask_predictions = mask_prediction_head.predict( features=image_feature, num_predictions_per_location=1) self.assertAllEqual([64, 323, 1, 7, 7], mask_predictions.get_shape().as_list()) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/predictors/heads/mask_head_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Class Head. Contains Class prediction head classes for different meta architectures. All the class prediction heads have a predict function that receives the `features` as the first argument and returns class predictions with background. """ import functools import tensorflow as tf from object_detection.predictors.heads import head slim = tf.contrib.slim class MaskRCNNClassHead(head.Head): """Mask RCNN class prediction head. Please refer to Mask RCNN paper: https://arxiv.org/abs/1703.06870 """ def __init__(self, is_training, num_class_slots, fc_hyperparams_fn, use_dropout, dropout_keep_prob): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_class_slots: number of class slots. Note that num_class_slots may or may not include an implicit background category. fc_hyperparams_fn: A function to generate tf-slim arg_scope with hyperparameters for fully connected ops. use_dropout: Option to use dropout or not. Note that a single dropout op is applied here prior to both box and class predictions, which stands in contrast to the ConvolutionalBoxPredictor below. dropout_keep_prob: Keep probability for dropout. This is only used if use_dropout is True. """ super(MaskRCNNClassHead, self).__init__() self._is_training = is_training self._num_class_slots = num_class_slots self._fc_hyperparams_fn = fc_hyperparams_fn self._use_dropout = use_dropout self._dropout_keep_prob = dropout_keep_prob def predict(self, features, num_predictions_per_location=1): """Predicts boxes and class scores. Args: features: A float tensor of shape [batch_size, height, width, channels] containing features for a batch of images. num_predictions_per_location: Int containing number of predictions per location. Returns: class_predictions_with_background: A float tensor of shape [batch_size, 1, num_class_slots] representing the class predictions for the proposals. Raises: ValueError: If num_predictions_per_location is not 1. """ if num_predictions_per_location != 1: raise ValueError('Only num_predictions_per_location=1 is supported') spatial_averaged_roi_pooled_features = tf.reduce_mean( features, [1, 2], keep_dims=True, name='AvgPool') flattened_roi_pooled_features = slim.flatten( spatial_averaged_roi_pooled_features) if self._use_dropout: flattened_roi_pooled_features = slim.dropout( flattened_roi_pooled_features, keep_prob=self._dropout_keep_prob, is_training=self._is_training) with slim.arg_scope(self._fc_hyperparams_fn()): class_predictions_with_background = slim.fully_connected( flattened_roi_pooled_features, self._num_class_slots, activation_fn=None, scope='ClassPredictor') class_predictions_with_background = tf.reshape( class_predictions_with_background, [-1, 1, self._num_class_slots]) return class_predictions_with_background class ConvolutionalClassHead(head.Head): """Convolutional class prediction head.""" def __init__(self, is_training, num_class_slots, use_dropout, dropout_keep_prob, kernel_size, apply_sigmoid_to_scores=False, class_prediction_bias_init=0.0, use_depthwise=False): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_class_slots: number of class slots. Note that num_class_slots may or may not include an implicit background category. use_dropout: Option to use dropout or not. Note that a single dropout op is applied here prior to both box and class predictions, which stands in contrast to the ConvolutionalBoxPredictor below. dropout_keep_prob: Keep probability for dropout. This is only used if use_dropout is True. kernel_size: Size of final convolution kernel. If the spatial resolution of the feature map is smaller than the kernel size, then the kernel size is automatically set to be min(feature_width, feature_height). apply_sigmoid_to_scores: if True, apply the sigmoid on the output class_predictions. class_prediction_bias_init: constant value to initialize bias of the last conv2d layer before class prediction. use_depthwise: Whether to use depthwise convolutions for prediction steps. Default is False. Raises: ValueError: if min_depth > max_depth. """ super(ConvolutionalClassHead, self).__init__() self._is_training = is_training self._num_class_slots = num_class_slots self._use_dropout = use_dropout self._dropout_keep_prob = dropout_keep_prob self._kernel_size = kernel_size self._apply_sigmoid_to_scores = apply_sigmoid_to_scores self._class_prediction_bias_init = class_prediction_bias_init self._use_depthwise = use_depthwise def predict(self, features, num_predictions_per_location): """Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing image features. num_predictions_per_location: Number of box predictions to be made per spatial location. Returns: class_predictions_with_background: A float tensors of shape [batch_size, num_anchors, num_class_slots] representing the class predictions for the proposals. """ net = features if self._use_dropout: net = slim.dropout(net, keep_prob=self._dropout_keep_prob) if self._use_depthwise: class_predictions_with_background = slim.separable_conv2d( net, None, [self._kernel_size, self._kernel_size], padding='SAME', depth_multiplier=1, stride=1, rate=1, scope='ClassPredictor_depthwise') class_predictions_with_background = slim.conv2d( class_predictions_with_background, num_predictions_per_location * self._num_class_slots, [1, 1], activation_fn=None, normalizer_fn=None, normalizer_params=None, scope='ClassPredictor') else: class_predictions_with_background = slim.conv2d( net, num_predictions_per_location * self._num_class_slots, [self._kernel_size, self._kernel_size], activation_fn=None, normalizer_fn=None, normalizer_params=None, scope='ClassPredictor', biases_initializer=tf.constant_initializer( self._class_prediction_bias_init)) if self._apply_sigmoid_to_scores: class_predictions_with_background = tf.sigmoid( class_predictions_with_background) batch_size = features.get_shape().as_list()[0] if batch_size is None: batch_size = tf.shape(features)[0] class_predictions_with_background = tf.reshape( class_predictions_with_background, [batch_size, -1, self._num_class_slots]) return class_predictions_with_background # TODO(alirezafathi): See if possible to unify Weight Shared with regular # convolutional class head. class WeightSharedConvolutionalClassHead(head.Head): """Weight shared convolutional class prediction head. This head allows sharing the same set of parameters (weights) when called more then once on different feature maps. """ def __init__(self, num_class_slots, kernel_size=3, class_prediction_bias_init=0.0, use_dropout=False, dropout_keep_prob=0.8, use_depthwise=False, score_converter_fn=tf.identity): """Constructor. Args: num_class_slots: number of class slots. Note that num_class_slots may or may not include an implicit background category. kernel_size: Size of final convolution kernel. class_prediction_bias_init: constant value to initialize bias of the last conv2d layer before class prediction. use_dropout: Whether to apply dropout to class prediction head. dropout_keep_prob: Probability of keeping activiations. use_depthwise: Whether to use depthwise convolutions for prediction steps. Default is False. score_converter_fn: Callable elementwise nonlinearity (that takes tensors as inputs and returns tensors). """ super(WeightSharedConvolutionalClassHead, self).__init__() self._num_class_slots = num_class_slots self._kernel_size = kernel_size self._class_prediction_bias_init = class_prediction_bias_init self._use_dropout = use_dropout self._dropout_keep_prob = dropout_keep_prob self._use_depthwise = use_depthwise self._score_converter_fn = score_converter_fn def predict(self, features, num_predictions_per_location): """Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing image features. num_predictions_per_location: Number of box predictions to be made per spatial location. Returns: class_predictions_with_background: A tensor of shape [batch_size, num_anchors, num_class_slots] representing the class predictions for the proposals. """ class_predictions_net = features if self._use_dropout: class_predictions_net = slim.dropout( class_predictions_net, keep_prob=self._dropout_keep_prob) if self._use_depthwise: conv_op = functools.partial(slim.separable_conv2d, depth_multiplier=1) else: conv_op = slim.conv2d class_predictions_with_background = conv_op( class_predictions_net, num_predictions_per_location * self._num_class_slots, [self._kernel_size, self._kernel_size], activation_fn=None, stride=1, padding='SAME', normalizer_fn=None, biases_initializer=tf.constant_initializer( self._class_prediction_bias_init), scope='ClassPredictor') batch_size = features.get_shape().as_list()[0] if batch_size is None: batch_size = tf.shape(features)[0] class_predictions_with_background = self._score_converter_fn( class_predictions_with_background) class_predictions_with_background = tf.reshape( class_predictions_with_background, [batch_size, -1, self._num_class_slots]) return class_predictions_with_background
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/predictors/heads/class_head.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A set of functions that are used for visualization. These functions often receive an image, perform some visualization on the image. The functions do not return a value, instead they modify the image itself. """ import abc import collections import functools # Set headless-friendly backend. import matplotlib; matplotlib.use('Agg') # pylint: disable=multiple-statements import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top import numpy as np import PIL.Image as Image import PIL.ImageColor as ImageColor import PIL.ImageDraw as ImageDraw import PIL.ImageFont as ImageFont import six import tensorflow as tf from object_detection.core import standard_fields as fields from object_detection.utils import shape_utils _TITLE_LEFT_MARGIN = 10 _TITLE_TOP_MARGIN = 10 STANDARD_COLORS = [ 'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Beige', 'Bisque', 'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite', 'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan', 'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange', 'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet', 'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite', 'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod', 'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki', 'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue', 'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey', 'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue', 'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime', 'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid', 'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen', 'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin', 'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed', 'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed', 'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple', 'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown', 'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue', 'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow', 'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White', 'WhiteSmoke', 'Yellow', 'YellowGreen' ] def save_image_array_as_png(image, output_path): """Saves an image (represented as a numpy array) to PNG. Args: image: a numpy array with shape [height, width, 3]. output_path: path to which image should be written. """ image_pil = Image.fromarray(np.uint8(image)).convert('RGB') with tf.gfile.Open(output_path, 'w') as fid: image_pil.save(fid, 'PNG') def encode_image_array_as_png_str(image): """Encodes a numpy array into a PNG string. Args: image: a numpy array with shape [height, width, 3]. Returns: PNG encoded image string. """ image_pil = Image.fromarray(np.uint8(image)) output = six.BytesIO() image_pil.save(output, format='PNG') png_string = output.getvalue() output.close() return png_string def draw_bounding_box_on_image_array(image, ymin, xmin, ymax, xmax, color='red', thickness=4, display_str_list=(), use_normalized_coordinates=True): """Adds a bounding box to an image (numpy array). Bounding box coordinates can be specified in either absolute (pixel) or normalized coordinates by setting the use_normalized_coordinates argument. Args: image: a numpy array with shape [height, width, 3]. ymin: ymin of bounding box. xmin: xmin of bounding box. ymax: ymax of bounding box. xmax: xmax of bounding box. color: color to draw bounding box. Default is red. thickness: line thickness. Default value is 4. display_str_list: list of strings to display in box (each to be shown on its own line). use_normalized_coordinates: If True (default), treat coordinates ymin, xmin, ymax, xmax as relative to the image. Otherwise treat coordinates as absolute. """ image_pil = Image.fromarray(np.uint8(image)).convert('RGB') draw_bounding_box_on_image(image_pil, ymin, xmin, ymax, xmax, color, thickness, display_str_list, use_normalized_coordinates) np.copyto(image, np.array(image_pil)) def draw_bounding_box_on_image(image, ymin, xmin, ymax, xmax, color='red', thickness=4, display_str_list=(), use_normalized_coordinates=True): """Adds a bounding box to an image. Bounding box coordinates can be specified in either absolute (pixel) or normalized coordinates by setting the use_normalized_coordinates argument. Each string in display_str_list is displayed on a separate line above the bounding box in black text on a rectangle filled with the input 'color'. If the top of the bounding box extends to the edge of the image, the strings are displayed below the bounding box. Args: image: a PIL.Image object. ymin: ymin of bounding box. xmin: xmin of bounding box. ymax: ymax of bounding box. xmax: xmax of bounding box. color: color to draw bounding box. Default is red. thickness: line thickness. Default value is 4. display_str_list: list of strings to display in box (each to be shown on its own line). use_normalized_coordinates: If True (default), treat coordinates ymin, xmin, ymax, xmax as relative to the image. Otherwise treat coordinates as absolute. """ draw = ImageDraw.Draw(image) im_width, im_height = image.size if use_normalized_coordinates: (left, right, top, bottom) = (xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height) else: (left, right, top, bottom) = (xmin, xmax, ymin, ymax) draw.line([(left, top), (left, bottom), (right, bottom), (right, top), (left, top)], width=thickness, fill=color) try: font = ImageFont.truetype('arial.ttf', 24) except IOError: font = ImageFont.load_default() # If the total height of the display strings added to the top of the bounding # box exceeds the top of the image, stack the strings below the bounding box # instead of above. display_str_heights = [font.getsize(ds)[1] for ds in display_str_list] # Each display_str has a top and bottom margin of 0.05x. total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights) if top > total_display_str_height: text_bottom = top else: text_bottom = bottom + total_display_str_height # Reverse list and print from bottom to top. for display_str in display_str_list[::-1]: text_width, text_height = font.getsize(display_str) margin = np.ceil(0.05 * text_height) draw.rectangle( [(left, text_bottom - text_height - 2 * margin), (left + text_width, text_bottom)], fill=color) draw.text( (left + margin, text_bottom - text_height - margin), display_str, fill='black', font=font) text_bottom -= text_height - 2 * margin def draw_bounding_boxes_on_image_array(image, boxes, color='red', thickness=4, display_str_list_list=()): """Draws bounding boxes on image (numpy array). Args: image: a numpy array object. boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax). The coordinates are in normalized format between [0, 1]. color: color to draw bounding box. Default is red. thickness: line thickness. Default value is 4. display_str_list_list: list of list of strings. a list of strings for each bounding box. The reason to pass a list of strings for a bounding box is that it might contain multiple labels. Raises: ValueError: if boxes is not a [N, 4] array """ image_pil = Image.fromarray(image) draw_bounding_boxes_on_image(image_pil, boxes, color, thickness, display_str_list_list) np.copyto(image, np.array(image_pil)) def draw_bounding_boxes_on_image(image, boxes, color='red', thickness=4, display_str_list_list=()): """Draws bounding boxes on image. Args: image: a PIL.Image object. boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax). The coordinates are in normalized format between [0, 1]. color: color to draw bounding box. Default is red. thickness: line thickness. Default value is 4. display_str_list_list: list of list of strings. a list of strings for each bounding box. The reason to pass a list of strings for a bounding box is that it might contain multiple labels. Raises: ValueError: if boxes is not a [N, 4] array """ boxes_shape = boxes.shape if not boxes_shape: return if len(boxes_shape) != 2 or boxes_shape[1] != 4: raise ValueError('Input must be of size [N, 4]') for i in range(boxes_shape[0]): display_str_list = () if display_str_list_list: display_str_list = display_str_list_list[i] draw_bounding_box_on_image(image, boxes[i, 0], boxes[i, 1], boxes[i, 2], boxes[i, 3], color, thickness, display_str_list) def _visualize_boxes(image, boxes, classes, scores, category_index, **kwargs): return visualize_boxes_and_labels_on_image_array( image, boxes, classes, scores, category_index=category_index, **kwargs) def _visualize_boxes_and_masks(image, boxes, classes, scores, masks, category_index, **kwargs): return visualize_boxes_and_labels_on_image_array( image, boxes, classes, scores, category_index=category_index, instance_masks=masks, **kwargs) def _visualize_boxes_and_keypoints(image, boxes, classes, scores, keypoints, category_index, **kwargs): return visualize_boxes_and_labels_on_image_array( image, boxes, classes, scores, category_index=category_index, keypoints=keypoints, **kwargs) def _visualize_boxes_and_masks_and_keypoints( image, boxes, classes, scores, masks, keypoints, category_index, **kwargs): return visualize_boxes_and_labels_on_image_array( image, boxes, classes, scores, category_index=category_index, instance_masks=masks, keypoints=keypoints, **kwargs) def _resize_original_image(image, image_shape): image = tf.expand_dims(image, 0) image = tf.image.resize_images( image, image_shape, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR, align_corners=True) return tf.cast(tf.squeeze(image, 0), tf.uint8) def draw_bounding_boxes_on_image_tensors(images, boxes, classes, scores, category_index, original_image_spatial_shape=None, true_image_shape=None, instance_masks=None, keypoints=None, max_boxes_to_draw=20, min_score_thresh=0.2, use_normalized_coordinates=True): """Draws bounding boxes, masks, and keypoints on batch of image tensors. Args: images: A 4D uint8 image tensor of shape [N, H, W, C]. If C > 3, additional channels will be ignored. If C = 1, then we convert the images to RGB images. boxes: [N, max_detections, 4] float32 tensor of detection boxes. classes: [N, max_detections] int tensor of detection classes. Note that classes are 1-indexed. scores: [N, max_detections] float32 tensor of detection scores. category_index: a dict that maps integer ids to category dicts. e.g. {1: {1: 'dog'}, 2: {2: 'cat'}, ...} original_image_spatial_shape: [N, 2] tensor containing the spatial size of the original image. true_image_shape: [N, 3] tensor containing the spatial size of unpadded original_image. instance_masks: A 4D uint8 tensor of shape [N, max_detection, H, W] with instance masks. keypoints: A 4D float32 tensor of shape [N, max_detection, num_keypoints, 2] with keypoints. max_boxes_to_draw: Maximum number of boxes to draw on an image. Default 20. min_score_thresh: Minimum score threshold for visualization. Default 0.2. use_normalized_coordinates: Whether to assume boxes and kepoints are in normalized coordinates (as opposed to absolute coordiantes). Default is True. Returns: 4D image tensor of type uint8, with boxes drawn on top. """ # Additional channels are being ignored. if images.shape[3] > 3: images = images[:, :, :, 0:3] elif images.shape[3] == 1: images = tf.image.grayscale_to_rgb(images) visualization_keyword_args = { 'use_normalized_coordinates': use_normalized_coordinates, 'max_boxes_to_draw': max_boxes_to_draw, 'min_score_thresh': min_score_thresh, 'agnostic_mode': False, 'line_thickness': 4 } if true_image_shape is None: true_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 3]) else: true_shapes = true_image_shape if original_image_spatial_shape is None: original_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 2]) else: original_shapes = original_image_spatial_shape if instance_masks is not None and keypoints is None: visualize_boxes_fn = functools.partial( _visualize_boxes_and_masks, category_index=category_index, **visualization_keyword_args) elems = [ true_shapes, original_shapes, images, boxes, classes, scores, instance_masks ] elif instance_masks is None and keypoints is not None: visualize_boxes_fn = functools.partial( _visualize_boxes_and_keypoints, category_index=category_index, **visualization_keyword_args) elems = [ true_shapes, original_shapes, images, boxes, classes, scores, keypoints ] elif instance_masks is not None and keypoints is not None: visualize_boxes_fn = functools.partial( _visualize_boxes_and_masks_and_keypoints, category_index=category_index, **visualization_keyword_args) elems = [ true_shapes, original_shapes, images, boxes, classes, scores, instance_masks, keypoints ] else: visualize_boxes_fn = functools.partial( _visualize_boxes, category_index=category_index, **visualization_keyword_args) elems = [ true_shapes, original_shapes, images, boxes, classes, scores ] def draw_boxes(image_and_detections): """Draws boxes on image.""" true_shape = image_and_detections[0] original_shape = image_and_detections[1] if true_image_shape is not None: image = shape_utils.pad_or_clip_nd(image_and_detections[2], [true_shape[0], true_shape[1], 3]) if original_image_spatial_shape is not None: image_and_detections[2] = _resize_original_image(image, original_shape) image_with_boxes = tf.py_func(visualize_boxes_fn, image_and_detections[2:], tf.uint8) return image_with_boxes images = tf.map_fn(draw_boxes, elems, dtype=tf.uint8, back_prop=False) return images def draw_side_by_side_evaluation_image(eval_dict, category_index, max_boxes_to_draw=20, min_score_thresh=0.2, use_normalized_coordinates=True): """Creates a side-by-side image with detections and groundtruth. Bounding boxes (and instance masks, if available) are visualized on both subimages. Args: eval_dict: The evaluation dictionary returned by eval_util.result_dict_for_batched_example() or eval_util.result_dict_for_single_example(). category_index: A category index (dictionary) produced from a labelmap. max_boxes_to_draw: The maximum number of boxes to draw for detections. min_score_thresh: The minimum score threshold for showing detections. use_normalized_coordinates: Whether to assume boxes and kepoints are in normalized coordinates (as opposed to absolute coordiantes). Default is True. Returns: A list of [1, H, 2 * W, C] uint8 tensor. The subimage on the left corresponds to detections, while the subimage on the right corresponds to groundtruth. """ detection_fields = fields.DetectionResultFields() input_data_fields = fields.InputDataFields() images_with_detections_list = [] # Add the batch dimension if the eval_dict is for single example. if len(eval_dict[detection_fields.detection_classes].shape) == 1: for key in eval_dict: if key != input_data_fields.original_image: eval_dict[key] = tf.expand_dims(eval_dict[key], 0) for indx in range(eval_dict[input_data_fields.original_image].shape[0]): instance_masks = None if detection_fields.detection_masks in eval_dict: instance_masks = tf.cast( tf.expand_dims( eval_dict[detection_fields.detection_masks][indx], axis=0), tf.uint8) keypoints = None if detection_fields.detection_keypoints in eval_dict: keypoints = tf.expand_dims( eval_dict[detection_fields.detection_keypoints][indx], axis=0) groundtruth_instance_masks = None if input_data_fields.groundtruth_instance_masks in eval_dict: groundtruth_instance_masks = tf.cast( tf.expand_dims( eval_dict[input_data_fields.groundtruth_instance_masks][indx], axis=0), tf.uint8) images_with_detections = draw_bounding_boxes_on_image_tensors( tf.expand_dims( eval_dict[input_data_fields.original_image][indx], axis=0), tf.expand_dims( eval_dict[detection_fields.detection_boxes][indx], axis=0), tf.expand_dims( eval_dict[detection_fields.detection_classes][indx], axis=0), tf.expand_dims( eval_dict[detection_fields.detection_scores][indx], axis=0), category_index, original_image_spatial_shape=tf.expand_dims( eval_dict[input_data_fields.original_image_spatial_shape][indx], axis=0), true_image_shape=tf.expand_dims( eval_dict[input_data_fields.true_image_shape][indx], axis=0), instance_masks=instance_masks, keypoints=keypoints, max_boxes_to_draw=max_boxes_to_draw, min_score_thresh=min_score_thresh, use_normalized_coordinates=use_normalized_coordinates) images_with_groundtruth = draw_bounding_boxes_on_image_tensors( tf.expand_dims( eval_dict[input_data_fields.original_image][indx], axis=0), tf.expand_dims( eval_dict[input_data_fields.groundtruth_boxes][indx], axis=0), tf.expand_dims( eval_dict[input_data_fields.groundtruth_classes][indx], axis=0), tf.expand_dims( tf.ones_like( eval_dict[input_data_fields.groundtruth_classes][indx], dtype=tf.float32), axis=0), category_index, original_image_spatial_shape=tf.expand_dims( eval_dict[input_data_fields.original_image_spatial_shape][indx], axis=0), true_image_shape=tf.expand_dims( eval_dict[input_data_fields.true_image_shape][indx], axis=0), instance_masks=groundtruth_instance_masks, keypoints=None, max_boxes_to_draw=None, min_score_thresh=0.0, use_normalized_coordinates=use_normalized_coordinates) images_with_detections_list.append( tf.concat([images_with_detections, images_with_groundtruth], axis=2)) return images_with_detections_list def draw_keypoints_on_image_array(image, keypoints, color='red', radius=2, use_normalized_coordinates=True): """Draws keypoints on an image (numpy array). Args: image: a numpy array with shape [height, width, 3]. keypoints: a numpy array with shape [num_keypoints, 2]. color: color to draw the keypoints with. Default is red. radius: keypoint radius. Default value is 2. use_normalized_coordinates: if True (default), treat keypoint values as relative to the image. Otherwise treat them as absolute. """ image_pil = Image.fromarray(np.uint8(image)).convert('RGB') draw_keypoints_on_image(image_pil, keypoints, color, radius, use_normalized_coordinates) np.copyto(image, np.array(image_pil)) def draw_keypoints_on_image(image, keypoints, color='red', radius=2, use_normalized_coordinates=True): """Draws keypoints on an image. Args: image: a PIL.Image object. keypoints: a numpy array with shape [num_keypoints, 2]. color: color to draw the keypoints with. Default is red. radius: keypoint radius. Default value is 2. use_normalized_coordinates: if True (default), treat keypoint values as relative to the image. Otherwise treat them as absolute. """ draw = ImageDraw.Draw(image) im_width, im_height = image.size keypoints_x = [k[1] for k in keypoints] keypoints_y = [k[0] for k in keypoints] if use_normalized_coordinates: keypoints_x = tuple([im_width * x for x in keypoints_x]) keypoints_y = tuple([im_height * y for y in keypoints_y]) for keypoint_x, keypoint_y in zip(keypoints_x, keypoints_y): draw.ellipse([(keypoint_x - radius, keypoint_y - radius), (keypoint_x + radius, keypoint_y + radius)], outline=color, fill=color) def draw_mask_on_image_array(image, mask, color='red', alpha=0.4): """Draws mask on an image. Args: image: uint8 numpy array with shape (img_height, img_height, 3) mask: a uint8 numpy array of shape (img_height, img_height) with values between either 0 or 1. color: color to draw the keypoints with. Default is red. alpha: transparency value between 0 and 1. (default: 0.4) Raises: ValueError: On incorrect data type for image or masks. """ if image.dtype != np.uint8: raise ValueError('`image` not of type np.uint8') if mask.dtype != np.uint8: raise ValueError('`mask` not of type np.uint8') if np.any(np.logical_and(mask != 1, mask != 0)): raise ValueError('`mask` elements should be in [0, 1]') if image.shape[:2] != mask.shape: raise ValueError('The image has spatial dimensions %s but the mask has ' 'dimensions %s' % (image.shape[:2], mask.shape)) rgb = ImageColor.getrgb(color) pil_image = Image.fromarray(image) solid_color = np.expand_dims( np.ones_like(mask), axis=2) * np.reshape(list(rgb), [1, 1, 3]) pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA') pil_mask = Image.fromarray(np.uint8(255.0*alpha*mask)).convert('L') pil_image = Image.composite(pil_solid_color, pil_image, pil_mask) np.copyto(image, np.array(pil_image.convert('RGB'))) def visualize_boxes_and_labels_on_image_array( image, boxes, classes, scores, category_index, instance_masks=None, instance_boundaries=None, keypoints=None, use_normalized_coordinates=False, max_boxes_to_draw=20, min_score_thresh=.5, agnostic_mode=False, line_thickness=4, groundtruth_box_visualization_color='black', skip_scores=False, skip_labels=False): """Overlay labeled boxes on an image with formatted scores and label names. This function groups boxes that correspond to the same location and creates a display string for each detection and overlays these on the image. Note that this function modifies the image in place, and returns that same image. Args: image: uint8 numpy array with shape (img_height, img_width, 3) boxes: a numpy array of shape [N, 4] classes: a numpy array of shape [N]. Note that class indices are 1-based, and match the keys in the label map. scores: a numpy array of shape [N] or None. If scores=None, then this function assumes that the boxes to be plotted are groundtruth boxes and plot all boxes as black with no classes or scores. category_index: a dict containing category dictionaries (each holding category index `id` and category name `name`) keyed by category indices. instance_masks: a numpy array of shape [N, image_height, image_width] with values ranging between 0 and 1, can be None. instance_boundaries: a numpy array of shape [N, image_height, image_width] with values ranging between 0 and 1, can be None. keypoints: a numpy array of shape [N, num_keypoints, 2], can be None use_normalized_coordinates: whether boxes is to be interpreted as normalized coordinates or not. max_boxes_to_draw: maximum number of boxes to visualize. If None, draw all boxes. min_score_thresh: minimum score threshold for a box to be visualized agnostic_mode: boolean (default: False) controlling whether to evaluate in class-agnostic mode or not. This mode will display scores but ignore classes. line_thickness: integer (default: 4) controlling line width of the boxes. groundtruth_box_visualization_color: box color for visualizing groundtruth boxes skip_scores: whether to skip score when drawing a single detection skip_labels: whether to skip label when drawing a single detection Returns: uint8 numpy array with shape (img_height, img_width, 3) with overlaid boxes. """ # Create a display string (and color) for every box location, group any boxes # that correspond to the same location. box_to_display_str_map = collections.defaultdict(list) box_to_color_map = collections.defaultdict(str) box_to_instance_masks_map = {} box_to_instance_boundaries_map = {} box_to_keypoints_map = collections.defaultdict(list) if not max_boxes_to_draw: max_boxes_to_draw = boxes.shape[0] for i in range(min(max_boxes_to_draw, boxes.shape[0])): if scores is None or scores[i] > min_score_thresh: box = tuple(boxes[i].tolist()) if instance_masks is not None: box_to_instance_masks_map[box] = instance_masks[i] if instance_boundaries is not None: box_to_instance_boundaries_map[box] = instance_boundaries[i] if keypoints is not None: box_to_keypoints_map[box].extend(keypoints[i]) if scores is None: box_to_color_map[box] = groundtruth_box_visualization_color else: display_str = '' if not skip_labels: if not agnostic_mode: if classes[i] in category_index.keys(): class_name = category_index[classes[i]]['name'] else: class_name = 'N/A' display_str = str(class_name) if not skip_scores: if not display_str: display_str = '{}%'.format(int(100*scores[i])) else: display_str = '{}: {}%'.format(display_str, int(100*scores[i])) box_to_display_str_map[box].append(display_str) if agnostic_mode: box_to_color_map[box] = 'DarkOrange' else: box_to_color_map[box] = STANDARD_COLORS[ classes[i] % len(STANDARD_COLORS)] # Draw all boxes onto image. for box, color in box_to_color_map.items(): ymin, xmin, ymax, xmax = box if instance_masks is not None: draw_mask_on_image_array( image, box_to_instance_masks_map[box], color=color ) if instance_boundaries is not None: draw_mask_on_image_array( image, box_to_instance_boundaries_map[box], color='red', alpha=1.0 ) draw_bounding_box_on_image_array( image, ymin, xmin, ymax, xmax, color=color, thickness=line_thickness, display_str_list=box_to_display_str_map[box], use_normalized_coordinates=use_normalized_coordinates) if keypoints is not None: draw_keypoints_on_image_array( image, box_to_keypoints_map[box], color=color, radius=line_thickness / 2, use_normalized_coordinates=use_normalized_coordinates) return image def add_cdf_image_summary(values, name): """Adds a tf.summary.image for a CDF plot of the values. Normalizes `values` such that they sum to 1, plots the cumulative distribution function and creates a tf image summary. Args: values: a 1-D float32 tensor containing the values. name: name for the image summary. """ def cdf_plot(values): """Numpy function to plot CDF.""" normalized_values = values / np.sum(values) sorted_values = np.sort(normalized_values) cumulative_values = np.cumsum(sorted_values) fraction_of_examples = (np.arange(cumulative_values.size, dtype=np.float32) / cumulative_values.size) fig = plt.figure(frameon=False) ax = fig.add_subplot('111') ax.plot(fraction_of_examples, cumulative_values) ax.set_ylabel('cumulative normalized values') ax.set_xlabel('fraction of examples') fig.canvas.draw() width, height = fig.get_size_inches() * fig.get_dpi() image = np.fromstring(fig.canvas.tostring_rgb(), dtype='uint8').reshape( 1, int(height), int(width), 3) return image cdf_plot = tf.py_func(cdf_plot, [values], tf.uint8) tf.summary.image(name, cdf_plot) def add_hist_image_summary(values, bins, name): """Adds a tf.summary.image for a histogram plot of the values. Plots the histogram of values and creates a tf image summary. Args: values: a 1-D float32 tensor containing the values. bins: bin edges which will be directly passed to np.histogram. name: name for the image summary. """ def hist_plot(values, bins): """Numpy function to plot hist.""" fig = plt.figure(frameon=False) ax = fig.add_subplot('111') y, x = np.histogram(values, bins=bins) ax.plot(x[:-1], y) ax.set_ylabel('count') ax.set_xlabel('value') fig.canvas.draw() width, height = fig.get_size_inches() * fig.get_dpi() image = np.fromstring( fig.canvas.tostring_rgb(), dtype='uint8').reshape( 1, int(height), int(width), 3) return image hist_plot = tf.py_func(hist_plot, [values, bins], tf.uint8) tf.summary.image(name, hist_plot) class EvalMetricOpsVisualization(object): """Abstract base class responsible for visualizations during evaluation. Currently, summary images are not run during evaluation. One way to produce evaluation images in Tensorboard is to provide tf.summary.image strings as `value_ops` in tf.estimator.EstimatorSpec's `eval_metric_ops`. This class is responsible for accruing images (with overlaid detections and groundtruth) and returning a dictionary that can be passed to `eval_metric_ops`. """ __metaclass__ = abc.ABCMeta def __init__(self, category_index, max_examples_to_draw=5, max_boxes_to_draw=20, min_score_thresh=0.2, use_normalized_coordinates=True, summary_name_prefix='evaluation_image'): """Creates an EvalMetricOpsVisualization. Args: category_index: A category index (dictionary) produced from a labelmap. max_examples_to_draw: The maximum number of example summaries to produce. max_boxes_to_draw: The maximum number of boxes to draw for detections. min_score_thresh: The minimum score threshold for showing detections. use_normalized_coordinates: Whether to assume boxes and kepoints are in normalized coordinates (as opposed to absolute coordiantes). Default is True. summary_name_prefix: A string prefix for each image summary. """ self._category_index = category_index self._max_examples_to_draw = max_examples_to_draw self._max_boxes_to_draw = max_boxes_to_draw self._min_score_thresh = min_score_thresh self._use_normalized_coordinates = use_normalized_coordinates self._summary_name_prefix = summary_name_prefix self._images = [] def clear(self): self._images = [] def add_images(self, images): """Store a list of images, each with shape [1, H, W, C].""" if len(self._images) >= self._max_examples_to_draw: return # Store images and clip list if necessary. self._images.extend(images) if len(self._images) > self._max_examples_to_draw: self._images[self._max_examples_to_draw:] = [] def get_estimator_eval_metric_ops(self, eval_dict): """Returns metric ops for use in tf.estimator.EstimatorSpec. Args: eval_dict: A dictionary that holds an image, groundtruth, and detections for a batched example. Note that, we use only the first example for visualization. See eval_util.result_dict_for_batched_example() for a convenient method for constructing such a dictionary. The dictionary contains fields.InputDataFields.original_image: [batch_size, H, W, 3] image. fields.InputDataFields.original_image_spatial_shape: [batch_size, 2] tensor containing the size of the original image. fields.InputDataFields.true_image_shape: [batch_size, 3] tensor containing the spatial size of the upadded original image. fields.InputDataFields.groundtruth_boxes - [batch_size, num_boxes, 4] float32 tensor with groundtruth boxes in range [0.0, 1.0]. fields.InputDataFields.groundtruth_classes - [batch_size, num_boxes] int64 tensor with 1-indexed groundtruth classes. fields.InputDataFields.groundtruth_instance_masks - (optional) [batch_size, num_boxes, H, W] int64 tensor with instance masks. fields.DetectionResultFields.detection_boxes - [batch_size, max_num_boxes, 4] float32 tensor with detection boxes in range [0.0, 1.0]. fields.DetectionResultFields.detection_classes - [batch_size, max_num_boxes] int64 tensor with 1-indexed detection classes. fields.DetectionResultFields.detection_scores - [batch_size, max_num_boxes] float32 tensor with detection scores. fields.DetectionResultFields.detection_masks - (optional) [batch_size, max_num_boxes, H, W] float32 tensor of binarized masks. fields.DetectionResultFields.detection_keypoints - (optional) [batch_size, max_num_boxes, num_keypoints, 2] float32 tensor with keypoints. Returns: A dictionary of image summary names to tuple of (value_op, update_op). The `update_op` is the same for all items in the dictionary, and is responsible for saving a single side-by-side image with detections and groundtruth. Each `value_op` holds the tf.summary.image string for a given image. """ if self._max_examples_to_draw == 0: return {} images = self.images_from_evaluation_dict(eval_dict) def get_images(): """Returns a list of images, padded to self._max_images_to_draw.""" images = self._images while len(images) < self._max_examples_to_draw: images.append(np.array(0, dtype=np.uint8)) self.clear() return images def image_summary_or_default_string(summary_name, image): """Returns image summaries for non-padded elements.""" return tf.cond( tf.equal(tf.size(tf.shape(image)), 4), lambda: tf.summary.image(summary_name, image), lambda: tf.constant('')) update_op = tf.py_func(self.add_images, [[images[0]]], []) image_tensors = tf.py_func( get_images, [], [tf.uint8] * self._max_examples_to_draw) eval_metric_ops = {} for i, image in enumerate(image_tensors): summary_name = self._summary_name_prefix + '/' + str(i) value_op = image_summary_or_default_string(summary_name, image) eval_metric_ops[summary_name] = (value_op, update_op) return eval_metric_ops @abc.abstractmethod def images_from_evaluation_dict(self, eval_dict): """Converts evaluation dictionary into a list of image tensors. To be overridden by implementations. Args: eval_dict: A dictionary with all the necessary information for producing visualizations. Returns: A list of [1, H, W, C] uint8 tensors. """ raise NotImplementedError class VisualizeSingleFrameDetections(EvalMetricOpsVisualization): """Class responsible for single-frame object detection visualizations.""" def __init__(self, category_index, max_examples_to_draw=5, max_boxes_to_draw=20, min_score_thresh=0.2, use_normalized_coordinates=True, summary_name_prefix='Detections_Left_Groundtruth_Right'): super(VisualizeSingleFrameDetections, self).__init__( category_index=category_index, max_examples_to_draw=max_examples_to_draw, max_boxes_to_draw=max_boxes_to_draw, min_score_thresh=min_score_thresh, use_normalized_coordinates=use_normalized_coordinates, summary_name_prefix=summary_name_prefix) def images_from_evaluation_dict(self, eval_dict): return draw_side_by_side_evaluation_image( eval_dict, self._category_index, self._max_boxes_to_draw, self._min_score_thresh, self._use_normalized_coordinates)
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/utils/visualization_utils.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.utils.label_map_util.""" import os import tensorflow as tf from google.protobuf import text_format from object_detection.protos import string_int_label_map_pb2 from object_detection.utils import label_map_util class LabelMapUtilTest(tf.test.TestCase): def _generate_label_map(self, num_classes): label_map_proto = string_int_label_map_pb2.StringIntLabelMap() for i in range(1, num_classes + 1): item = label_map_proto.item.add() item.id = i item.name = 'label_' + str(i) item.display_name = str(i) return label_map_proto def test_get_label_map_dict(self): label_map_string = """ item { id:2 name:'cat' } item { id:1 name:'dog' } """ label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt') with tf.gfile.Open(label_map_path, 'wb') as f: f.write(label_map_string) label_map_dict = label_map_util.get_label_map_dict(label_map_path) self.assertEqual(label_map_dict['dog'], 1) self.assertEqual(label_map_dict['cat'], 2) def test_get_label_map_dict_display(self): label_map_string = """ item { id:2 display_name:'cat' } item { id:1 display_name:'dog' } """ label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt') with tf.gfile.Open(label_map_path, 'wb') as f: f.write(label_map_string) label_map_dict = label_map_util.get_label_map_dict( label_map_path, use_display_name=True) self.assertEqual(label_map_dict['dog'], 1) self.assertEqual(label_map_dict['cat'], 2) def test_load_bad_label_map(self): label_map_string = """ item { id:0 name:'class that should not be indexed at zero' } item { id:2 name:'cat' } item { id:1 name:'dog' } """ label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt') with tf.gfile.Open(label_map_path, 'wb') as f: f.write(label_map_string) with self.assertRaises(ValueError): label_map_util.load_labelmap(label_map_path) def test_load_label_map_with_background(self): label_map_string = """ item { id:0 name:'background' } item { id:2 name:'cat' } item { id:1 name:'dog' } """ label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt') with tf.gfile.Open(label_map_path, 'wb') as f: f.write(label_map_string) label_map_dict = label_map_util.get_label_map_dict(label_map_path) self.assertEqual(label_map_dict['background'], 0) self.assertEqual(label_map_dict['dog'], 1) self.assertEqual(label_map_dict['cat'], 2) def test_get_label_map_dict_with_fill_in_gaps_and_background(self): label_map_string = """ item { id:3 name:'cat' } item { id:1 name:'dog' } """ label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt') with tf.gfile.Open(label_map_path, 'wb') as f: f.write(label_map_string) label_map_dict = label_map_util.get_label_map_dict( label_map_path, fill_in_gaps_and_background=True) self.assertEqual(label_map_dict['background'], 0) self.assertEqual(label_map_dict['dog'], 1) self.assertEqual(label_map_dict['class_2'], 2) self.assertEqual(label_map_dict['cat'], 3) self.assertEqual(len(label_map_dict), max(label_map_dict.values()) + 1) def test_keep_categories_with_unique_id(self): label_map_proto = string_int_label_map_pb2.StringIntLabelMap() label_map_string = """ item { id:2 name:'cat' } item { id:1 name:'child' } item { id:1 name:'person' } item { id:1 name:'n00007846' } """ text_format.Merge(label_map_string, label_map_proto) categories = label_map_util.convert_label_map_to_categories( label_map_proto, max_num_classes=3) self.assertListEqual([{ 'id': 2, 'name': u'cat' }, { 'id': 1, 'name': u'child' }], categories) def test_convert_label_map_to_categories_no_label_map(self): categories = label_map_util.convert_label_map_to_categories( None, max_num_classes=3) expected_categories_list = [{ 'name': u'category_1', 'id': 1 }, { 'name': u'category_2', 'id': 2 }, { 'name': u'category_3', 'id': 3 }] self.assertListEqual(expected_categories_list, categories) def test_convert_label_map_to_categories(self): label_map_proto = self._generate_label_map(num_classes=4) categories = label_map_util.convert_label_map_to_categories( label_map_proto, max_num_classes=3) expected_categories_list = [{ 'name': u'1', 'id': 1 }, { 'name': u'2', 'id': 2 }, { 'name': u'3', 'id': 3 }] self.assertListEqual(expected_categories_list, categories) def test_convert_label_map_to_categories_with_few_classes(self): label_map_proto = self._generate_label_map(num_classes=4) cat_no_offset = label_map_util.convert_label_map_to_categories( label_map_proto, max_num_classes=2) expected_categories_list = [{ 'name': u'1', 'id': 1 }, { 'name': u'2', 'id': 2 }] self.assertListEqual(expected_categories_list, cat_no_offset) def test_get_max_label_map_index(self): num_classes = 4 label_map_proto = self._generate_label_map(num_classes=num_classes) max_index = label_map_util.get_max_label_map_index(label_map_proto) self.assertEqual(num_classes, max_index) def test_create_category_index(self): categories = [{'name': u'1', 'id': 1}, {'name': u'2', 'id': 2}] category_index = label_map_util.create_category_index(categories) self.assertDictEqual({ 1: { 'name': u'1', 'id': 1 }, 2: { 'name': u'2', 'id': 2 } }, category_index) def test_create_categories_from_labelmap(self): label_map_string = """ item { id:1 name:'dog' } item { id:2 name:'cat' } """ label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt') with tf.gfile.Open(label_map_path, 'wb') as f: f.write(label_map_string) categories = label_map_util.create_categories_from_labelmap(label_map_path) self.assertListEqual([{ 'name': u'dog', 'id': 1 }, { 'name': u'cat', 'id': 2 }], categories) def test_create_category_index_from_labelmap(self): label_map_string = """ item { id:2 name:'cat' } item { id:1 name:'dog' } """ label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt') with tf.gfile.Open(label_map_path, 'wb') as f: f.write(label_map_string) category_index = label_map_util.create_category_index_from_labelmap( label_map_path) self.assertDictEqual({ 1: { 'name': u'dog', 'id': 1 }, 2: { 'name': u'cat', 'id': 2 } }, category_index) def test_create_category_index_from_labelmap_display(self): label_map_string = """ item { id:2 name:'cat' display_name:'meow' } item { id:1 name:'dog' display_name:'woof' } """ label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt') with tf.gfile.Open(label_map_path, 'wb') as f: f.write(label_map_string) self.assertDictEqual({ 1: { 'name': u'dog', 'id': 1 }, 2: { 'name': u'cat', 'id': 2 } }, label_map_util.create_category_index_from_labelmap( label_map_path, False)) self.assertDictEqual({ 1: { 'name': u'woof', 'id': 1 }, 2: { 'name': u'meow', 'id': 2 } }, label_map_util.create_category_index_from_labelmap(label_map_path)) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/utils/label_map_util_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functions for computing metrics like precision, recall, CorLoc and etc.""" from __future__ import division import numpy as np def compute_precision_recall(scores, labels, num_gt): """Compute precision and recall. Args: scores: A float numpy array representing detection score labels: A float numpy array representing weighted true/false positive labels num_gt: Number of ground truth instances Raises: ValueError: if the input is not of the correct format Returns: precision: Fraction of positive instances over detected ones. This value is None if no ground truth labels are present. recall: Fraction of detected positive instance over all positive instances. This value is None if no ground truth labels are present. """ if not isinstance(labels, np.ndarray) or len(labels.shape) != 1: raise ValueError("labels must be single dimension numpy array") if labels.dtype != np.float and labels.dtype != np.bool: raise ValueError("labels type must be either bool or float") if not isinstance(scores, np.ndarray) or len(scores.shape) != 1: raise ValueError("scores must be single dimension numpy array") if num_gt < np.sum(labels): raise ValueError("Number of true positives must be smaller than num_gt.") if len(scores) != len(labels): raise ValueError("scores and labels must be of the same size.") if num_gt == 0: return None, None sorted_indices = np.argsort(scores) sorted_indices = sorted_indices[::-1] true_positive_labels = labels[sorted_indices] false_positive_labels = (true_positive_labels <= 0).astype(float) cum_true_positives = np.cumsum(true_positive_labels) cum_false_positives = np.cumsum(false_positive_labels) precision = cum_true_positives.astype(float) / ( cum_true_positives + cum_false_positives) recall = cum_true_positives.astype(float) / num_gt return precision, recall def compute_average_precision(precision, recall): """Compute Average Precision according to the definition in VOCdevkit. Precision is modified to ensure that it does not decrease as recall decrease. Args: precision: A float [N, 1] numpy array of precisions recall: A float [N, 1] numpy array of recalls Raises: ValueError: if the input is not of the correct format Returns: average_precison: The area under the precision recall curve. NaN if precision and recall are None. """ if precision is None: if recall is not None: raise ValueError("If precision is None, recall must also be None") return np.NAN if not isinstance(precision, np.ndarray) or not isinstance( recall, np.ndarray): raise ValueError("precision and recall must be numpy array") if precision.dtype != np.float or recall.dtype != np.float: raise ValueError("input must be float numpy array.") if len(precision) != len(recall): raise ValueError("precision and recall must be of the same size.") if not precision.size: return 0.0 if np.amin(precision) < 0 or np.amax(precision) > 1: raise ValueError("Precision must be in the range of [0, 1].") if np.amin(recall) < 0 or np.amax(recall) > 1: raise ValueError("recall must be in the range of [0, 1].") if not all(recall[i] <= recall[i + 1] for i in range(len(recall) - 1)): raise ValueError("recall must be a non-decreasing array") recall = np.concatenate([[0], recall, [1]]) precision = np.concatenate([[0], precision, [0]]) # Preprocess precision to be a non-decreasing array for i in range(len(precision) - 2, -1, -1): precision[i] = np.maximum(precision[i], precision[i + 1]) indices = np.where(recall[1:] != recall[:-1])[0] + 1 average_precision = np.sum( (recall[indices] - recall[indices - 1]) * precision[indices]) return average_precision def compute_cor_loc(num_gt_imgs_per_class, num_images_correctly_detected_per_class): """Compute CorLoc according to the definition in the following paper. https://www.robots.ox.ac.uk/~vgg/rg/papers/deselaers-eccv10.pdf Returns nans if there are no ground truth images for a class. Args: num_gt_imgs_per_class: 1D array, representing number of images containing at least one object instance of a particular class num_images_correctly_detected_per_class: 1D array, representing number of images that are correctly detected at least one object instance of a particular class Returns: corloc_per_class: A float numpy array represents the corloc score of each class """ return np.where( num_gt_imgs_per_class == 0, np.nan, num_images_correctly_detected_per_class / num_gt_imgs_per_class) def compute_median_rank_at_k(tp_fp_list, k): """Computes MedianRank@k, where k is the top-scoring labels. Args: tp_fp_list: a list of numpy arrays; each numpy array corresponds to the all detection on a single image, where the detections are sorted by score in descending order. Further, each numpy array element can have boolean or float values. True positive elements have either value >0.0 or True; any other value is considered false positive. k: number of top-scoring proposals to take. Returns: median_rank: median rank of all true positive proposals among top k by score. """ ranks = [] for i in range(len(tp_fp_list)): ranks.append( np.where(tp_fp_list[i][0:min(k, tp_fp_list[i].shape[0])] > 0)[0]) concatenated_ranks = np.concatenate(ranks) return np.median(concatenated_ranks) def compute_recall_at_k(tp_fp_list, num_gt, k): """Computes Recall@k, MedianRank@k, where k is the top-scoring labels. Args: tp_fp_list: a list of numpy arrays; each numpy array corresponds to the all detection on a single image, where the detections are sorted by score in descending order. Further, each numpy array element can have boolean or float values. True positive elements have either value >0.0 or True; any other value is considered false positive. num_gt: number of groundtruth anotations. k: number of top-scoring proposals to take. Returns: recall: recall evaluated on the top k by score detections. """ tp_fp_eval = [] for i in range(len(tp_fp_list)): tp_fp_eval.append(tp_fp_list[i][0:min(k, tp_fp_list[i].shape[0])]) tp_fp_eval = np.concatenate(tp_fp_eval) return np.sum(tp_fp_eval) / num_gt
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/utils/metrics.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.np_mask_ops.""" import numpy as np import tensorflow as tf from object_detection.utils import np_mask_ops class MaskOpsTests(tf.test.TestCase): def setUp(self): masks1_0 = np.array([[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0, 0]], dtype=np.uint8) masks1_1 = np.array([[1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.uint8) masks1 = np.stack([masks1_0, masks1_1]) masks2_0 = np.array([[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0, 0]], dtype=np.uint8) masks2_1 = np.array([[1, 1, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.uint8) masks2_2 = np.array([[1, 1, 1, 1, 1, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0, 0]], dtype=np.uint8) masks2 = np.stack([masks2_0, masks2_1, masks2_2]) self.masks1 = masks1 self.masks2 = masks2 def testArea(self): areas = np_mask_ops.area(self.masks1) expected_areas = np.array([8.0, 10.0], dtype=np.float32) self.assertAllClose(expected_areas, areas) def testIntersection(self): intersection = np_mask_ops.intersection(self.masks1, self.masks2) expected_intersection = np.array( [[8.0, 0.0, 8.0], [0.0, 9.0, 7.0]], dtype=np.float32) self.assertAllClose(intersection, expected_intersection) def testIOU(self): iou = np_mask_ops.iou(self.masks1, self.masks2) expected_iou = np.array( [[1.0, 0.0, 8.0/25.0], [0.0, 9.0 / 16.0, 7.0 / 28.0]], dtype=np.float32) self.assertAllClose(iou, expected_iou) def testIOA(self): ioa21 = np_mask_ops.ioa(self.masks1, self.masks2) expected_ioa21 = np.array([[1.0, 0.0, 8.0/25.0], [0.0, 9.0/15.0, 7.0/25.0]], dtype=np.float32) self.assertAllClose(ioa21, expected_ioa21) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/utils/np_mask_ops_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains functions which are convenient for unit testing.""" import numpy as np import tensorflow as tf from object_detection.core import anchor_generator from object_detection.core import box_coder from object_detection.core import box_list from object_detection.core import box_predictor from object_detection.core import matcher from object_detection.utils import shape_utils # Default size (both width and height) used for testing mask predictions. DEFAULT_MASK_SIZE = 5 class MockBoxCoder(box_coder.BoxCoder): """Simple `difference` BoxCoder.""" @property def code_size(self): return 4 def _encode(self, boxes, anchors): return boxes.get() - anchors.get() def _decode(self, rel_codes, anchors): return box_list.BoxList(rel_codes + anchors.get()) class MockMaskHead(object): """Simple maskhead that returns all zeros as mask predictions.""" def __init__(self, num_classes): self._num_classes = num_classes def predict(self, features): batch_size = tf.shape(features)[0] return tf.zeros((batch_size, 1, self._num_classes, DEFAULT_MASK_SIZE, DEFAULT_MASK_SIZE), dtype=tf.float32) class MockBoxPredictor(box_predictor.BoxPredictor): """Simple box predictor that ignores inputs and outputs all zeros.""" def __init__(self, is_training, num_classes, add_background_class=True): super(MockBoxPredictor, self).__init__(is_training, num_classes) self._add_background_class = add_background_class def _predict(self, image_features, num_predictions_per_location): image_feature = image_features[0] combined_feature_shape = shape_utils.combined_static_and_dynamic_shape( image_feature) batch_size = combined_feature_shape[0] num_anchors = (combined_feature_shape[1] * combined_feature_shape[2]) code_size = 4 zero = tf.reduce_sum(0 * image_feature) num_class_slots = self.num_classes if self._add_background_class: num_class_slots = num_class_slots + 1 box_encodings = zero + tf.zeros( (batch_size, num_anchors, 1, code_size), dtype=tf.float32) class_predictions_with_background = zero + tf.zeros( (batch_size, num_anchors, num_class_slots), dtype=tf.float32) predictions_dict = { box_predictor.BOX_ENCODINGS: box_encodings, box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND: class_predictions_with_background } return predictions_dict class MockKerasBoxPredictor(box_predictor.KerasBoxPredictor): """Simple box predictor that ignores inputs and outputs all zeros.""" def __init__(self, is_training, num_classes, add_background_class=True): super(MockKerasBoxPredictor, self).__init__( is_training, num_classes, False, False) self._add_background_class = add_background_class def _predict(self, image_features, **kwargs): image_feature = image_features[0] combined_feature_shape = shape_utils.combined_static_and_dynamic_shape( image_feature) batch_size = combined_feature_shape[0] num_anchors = (combined_feature_shape[1] * combined_feature_shape[2]) code_size = 4 zero = tf.reduce_sum(0 * image_feature) num_class_slots = self.num_classes if self._add_background_class: num_class_slots = num_class_slots + 1 box_encodings = zero + tf.zeros( (batch_size, num_anchors, 1, code_size), dtype=tf.float32) class_predictions_with_background = zero + tf.zeros( (batch_size, num_anchors, num_class_slots), dtype=tf.float32) predictions_dict = { box_predictor.BOX_ENCODINGS: box_encodings, box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND: class_predictions_with_background } return predictions_dict class MockAnchorGenerator(anchor_generator.AnchorGenerator): """Mock anchor generator.""" def name_scope(self): return 'MockAnchorGenerator' def num_anchors_per_location(self): return [1] def _generate(self, feature_map_shape_list): num_anchors = sum([shape[0] * shape[1] for shape in feature_map_shape_list]) return box_list.BoxList(tf.zeros((num_anchors, 4), dtype=tf.float32)) class MockMatcher(matcher.Matcher): """Simple matcher that matches first anchor to first groundtruth box.""" def _match(self, similarity_matrix, valid_rows): return tf.constant([0, -1, -1, -1], dtype=tf.int32) def create_diagonal_gradient_image(height, width, depth): """Creates pyramid image. Useful for testing. For example, pyramid_image(5, 6, 1) looks like: # [[[ 5. 4. 3. 2. 1. 0.] # [ 6. 5. 4. 3. 2. 1.] # [ 7. 6. 5. 4. 3. 2.] # [ 8. 7. 6. 5. 4. 3.] # [ 9. 8. 7. 6. 5. 4.]]] Args: height: height of image width: width of image depth: depth of image Returns: pyramid image """ row = np.arange(height) col = np.arange(width)[::-1] image_layer = np.expand_dims(row, 1) + col image_layer = np.expand_dims(image_layer, 2) image = image_layer for i in range(1, depth): image = np.concatenate((image, image_layer * pow(10, i)), 2) return image.astype(np.float32) def create_random_boxes(num_boxes, max_height, max_width): """Creates random bounding boxes of specific maximum height and width. Args: num_boxes: number of boxes. max_height: maximum height of boxes. max_width: maximum width of boxes. Returns: boxes: numpy array of shape [num_boxes, 4]. Each row is in form [y_min, x_min, y_max, x_max]. """ y_1 = np.random.uniform(size=(1, num_boxes)) * max_height y_2 = np.random.uniform(size=(1, num_boxes)) * max_height x_1 = np.random.uniform(size=(1, num_boxes)) * max_width x_2 = np.random.uniform(size=(1, num_boxes)) * max_width boxes = np.zeros(shape=(num_boxes, 4)) boxes[:, 0] = np.minimum(y_1, y_2) boxes[:, 1] = np.minimum(x_1, x_2) boxes[:, 2] = np.maximum(y_1, y_2) boxes[:, 3] = np.maximum(x_1, x_2) return boxes.astype(np.float32) def first_rows_close_as_set(a, b, k=None, rtol=1e-6, atol=1e-6): """Checks if first K entries of two lists are close, up to permutation. Inputs to this assert are lists of items which can be compared via numpy.allclose(...) and can be sorted. Args: a: list of items which can be compared via numpy.allclose(...) and are sortable. b: list of items which can be compared via numpy.allclose(...) and are sortable. k: a non-negative integer. If not provided, k is set to be len(a). rtol: relative tolerance. atol: absolute tolerance. Returns: boolean, True if input lists a and b have the same length and the first k entries of the inputs satisfy numpy.allclose() after sorting entries. """ if not isinstance(a, list) or not isinstance(b, list) or len(a) != len(b): return False if not k: k = len(a) k = min(k, len(a)) a_sorted = sorted(a[:k]) b_sorted = sorted(b[:k]) return all([ np.allclose(entry_a, entry_b, rtol, atol) for (entry_a, entry_b) in zip(a_sorted, b_sorted) ])
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/utils/test_utils.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.utils.np_box_list_test.""" import numpy as np import tensorflow as tf from object_detection.utils import np_box_list class BoxListTest(tf.test.TestCase): def test_invalid_box_data(self): with self.assertRaises(ValueError): np_box_list.BoxList([0, 0, 1, 1]) with self.assertRaises(ValueError): np_box_list.BoxList(np.array([[0, 0, 1, 1]], dtype=int)) with self.assertRaises(ValueError): np_box_list.BoxList(np.array([0, 1, 1, 3, 4], dtype=float)) with self.assertRaises(ValueError): np_box_list.BoxList(np.array([[0, 1, 1, 3], [3, 1, 1, 5]], dtype=float)) def test_has_field_with_existed_field(self): boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]], dtype=float) boxlist = np_box_list.BoxList(boxes) self.assertTrue(boxlist.has_field('boxes')) def test_has_field_with_nonexisted_field(self): boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]], dtype=float) boxlist = np_box_list.BoxList(boxes) self.assertFalse(boxlist.has_field('scores')) def test_get_field_with_existed_field(self): boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]], dtype=float) boxlist = np_box_list.BoxList(boxes) self.assertTrue(np.allclose(boxlist.get_field('boxes'), boxes)) def test_get_field_with_nonexited_field(self): boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]], dtype=float) boxlist = np_box_list.BoxList(boxes) with self.assertRaises(ValueError): boxlist.get_field('scores') class AddExtraFieldTest(tf.test.TestCase): def setUp(self): boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]], dtype=float) self.boxlist = np_box_list.BoxList(boxes) def test_add_already_existed_field(self): with self.assertRaises(ValueError): self.boxlist.add_field('boxes', np.array([[0, 0, 0, 1, 0]], dtype=float)) def test_add_invalid_field_data(self): with self.assertRaises(ValueError): self.boxlist.add_field('scores', np.array([0.5, 0.7], dtype=float)) with self.assertRaises(ValueError): self.boxlist.add_field('scores', np.array([0.5, 0.7, 0.9, 0.1], dtype=float)) def test_add_single_dimensional_field_data(self): boxlist = self.boxlist scores = np.array([0.5, 0.7, 0.9], dtype=float) boxlist.add_field('scores', scores) self.assertTrue(np.allclose(scores, self.boxlist.get_field('scores'))) def test_add_multi_dimensional_field_data(self): boxlist = self.boxlist labels = np.array([[0, 0, 0, 1, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 1]], dtype=int) boxlist.add_field('labels', labels) self.assertTrue(np.allclose(labels, self.boxlist.get_field('labels'))) def test_get_extra_fields(self): boxlist = self.boxlist self.assertItemsEqual(boxlist.get_extra_fields(), []) scores = np.array([0.5, 0.7, 0.9], dtype=float) boxlist.add_field('scores', scores) self.assertItemsEqual(boxlist.get_extra_fields(), ['scores']) labels = np.array([[0, 0, 0, 1, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 1]], dtype=int) boxlist.add_field('labels', labels) self.assertItemsEqual(boxlist.get_extra_fields(), ['scores', 'labels']) def test_get_coordinates(self): y_min, x_min, y_max, x_max = self.boxlist.get_coordinates() expected_y_min = np.array([3.0, 14.0, 0.0], dtype=float) expected_x_min = np.array([4.0, 14.0, 0.0], dtype=float) expected_y_max = np.array([6.0, 15.0, 20.0], dtype=float) expected_x_max = np.array([8.0, 15.0, 20.0], dtype=float) self.assertTrue(np.allclose(y_min, expected_y_min)) self.assertTrue(np.allclose(x_min, expected_x_min)) self.assertTrue(np.allclose(y_max, expected_y_max)) self.assertTrue(np.allclose(x_max, expected_x_max)) def test_num_boxes(self): boxes = np.array([[0., 0., 100., 100.], [10., 30., 50., 70.]], dtype=float) boxlist = np_box_list.BoxList(boxes) expected_num_boxes = 2 self.assertEquals(boxlist.num_boxes(), expected_num_boxes) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/utils/np_box_list_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Library of common learning rate schedules.""" import numpy as np import tensorflow as tf def exponential_decay_with_burnin(global_step, learning_rate_base, learning_rate_decay_steps, learning_rate_decay_factor, burnin_learning_rate=0.0, burnin_steps=0, min_learning_rate=0.0, staircase=True): """Exponential decay schedule with burn-in period. In this schedule, learning rate is fixed at burnin_learning_rate for a fixed period, before transitioning to a regular exponential decay schedule. Args: global_step: int tensor representing global step. learning_rate_base: base learning rate. learning_rate_decay_steps: steps to take between decaying the learning rate. Note that this includes the number of burn-in steps. learning_rate_decay_factor: multiplicative factor by which to decay learning rate. burnin_learning_rate: initial learning rate during burn-in period. If 0.0 (which is the default), then the burn-in learning rate is simply set to learning_rate_base. burnin_steps: number of steps to use burnin learning rate. min_learning_rate: the minimum learning rate. staircase: whether use staircase decay. Returns: a (scalar) float tensor representing learning rate """ if burnin_learning_rate == 0: burnin_learning_rate = learning_rate_base post_burnin_learning_rate = tf.train.exponential_decay( learning_rate_base, global_step - burnin_steps, learning_rate_decay_steps, learning_rate_decay_factor, staircase=staircase) return tf.maximum(tf.where( tf.less(tf.cast(global_step, tf.int32), tf.constant(burnin_steps)), tf.constant(burnin_learning_rate), post_burnin_learning_rate), min_learning_rate, name='learning_rate') def cosine_decay_with_warmup(global_step, learning_rate_base, total_steps, warmup_learning_rate=0.0, warmup_steps=0, hold_base_rate_steps=0): """Cosine decay schedule with warm up period. Cosine annealing learning rate as described in: Loshchilov and Hutter, SGDR: Stochastic Gradient Descent with Warm Restarts. ICLR 2017. https://arxiv.org/abs/1608.03983 In this schedule, the learning rate grows linearly from warmup_learning_rate to learning_rate_base for warmup_steps, then transitions to a cosine decay schedule. Args: global_step: int64 (scalar) tensor representing global step. learning_rate_base: base learning rate. total_steps: total number of training steps. warmup_learning_rate: initial learning rate for warm up. warmup_steps: number of warmup steps. hold_base_rate_steps: Optional number of steps to hold base learning rate before decaying. Returns: a (scalar) float tensor representing learning rate. Raises: ValueError: if warmup_learning_rate is larger than learning_rate_base, or if warmup_steps is larger than total_steps. """ if total_steps < warmup_steps: raise ValueError('total_steps must be larger or equal to ' 'warmup_steps.') learning_rate = 0.5 * learning_rate_base * (1 + tf.cos( np.pi * (tf.cast(global_step, tf.float32) - warmup_steps - hold_base_rate_steps ) / float(total_steps - warmup_steps - hold_base_rate_steps))) if hold_base_rate_steps > 0: learning_rate = tf.where(global_step > warmup_steps + hold_base_rate_steps, learning_rate, learning_rate_base) if warmup_steps > 0: if learning_rate_base < warmup_learning_rate: raise ValueError('learning_rate_base must be larger or equal to ' 'warmup_learning_rate.') slope = (learning_rate_base - warmup_learning_rate) / warmup_steps warmup_rate = slope * tf.cast(global_step, tf.float32) + warmup_learning_rate learning_rate = tf.where(global_step < warmup_steps, warmup_rate, learning_rate) return tf.where(global_step > total_steps, 0.0, learning_rate, name='learning_rate') def manual_stepping(global_step, boundaries, rates, warmup=False): """Manually stepped learning rate schedule. This function provides fine grained control over learning rates. One must specify a sequence of learning rates as well as a set of integer steps at which the current learning rate must transition to the next. For example, if boundaries = [5, 10] and rates = [.1, .01, .001], then the learning rate returned by this function is .1 for global_step=0,...,4, .01 for global_step=5...9, and .001 for global_step=10 and onward. Args: global_step: int64 (scalar) tensor representing global step. boundaries: a list of global steps at which to switch learning rates. This list is assumed to consist of increasing positive integers. rates: a list of (float) learning rates corresponding to intervals between the boundaries. The length of this list must be exactly len(boundaries) + 1. warmup: Whether to linearly interpolate learning rate for steps in [0, boundaries[0]]. Returns: a (scalar) float tensor representing learning rate Raises: ValueError: if one of the following checks fails: 1. boundaries is a strictly increasing list of positive integers 2. len(rates) == len(boundaries) + 1 3. boundaries[0] != 0 """ if any([b < 0 for b in boundaries]) or any( [not isinstance(b, int) for b in boundaries]): raise ValueError('boundaries must be a list of positive integers') if any([bnext <= b for bnext, b in zip(boundaries[1:], boundaries[:-1])]): raise ValueError('Entries in boundaries must be strictly increasing.') if any([not isinstance(r, float) for r in rates]): raise ValueError('Learning rates must be floats') if len(rates) != len(boundaries) + 1: raise ValueError('Number of provided learning rates must exceed ' 'number of boundary points by exactly 1.') if boundaries and boundaries[0] == 0: raise ValueError('First step cannot be zero.') if warmup and boundaries: slope = (rates[1] - rates[0]) * 1.0 / boundaries[0] warmup_steps = range(boundaries[0]) warmup_rates = [rates[0] + slope * step for step in warmup_steps] boundaries = warmup_steps + boundaries rates = warmup_rates + rates[1:] else: boundaries = [0] + boundaries num_boundaries = len(boundaries) rate_index = tf.reduce_max(tf.where(tf.greater_equal(global_step, boundaries), list(range(num_boundaries)), [0] * num_boundaries)) return tf.reduce_sum(rates * tf.one_hot(rate_index, depth=num_boundaries), name='learning_rate')
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/utils/learning_schedules.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Python context management helper.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function class IdentityContextManager(object): """Returns an identity context manager that does nothing. This is helpful in setting up conditional `with` statement as below: with slim.arg_scope(x) if use_slim_scope else IdentityContextManager(): do_stuff() """ def __enter__(self): return None def __exit__(self, exec_type, exec_value, traceback): del exec_type del exec_value del traceback return False
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/utils/context_manager.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Bounding Box List operations for Numpy BoxLists. Example box operations that are supported: * Areas: compute bounding box areas * IOU: pairwise intersection-over-union scores """ import numpy as np from object_detection.utils import np_box_list from object_detection.utils import np_box_ops class SortOrder(object): """Enum class for sort order. Attributes: ascend: ascend order. descend: descend order. """ ASCEND = 1 DESCEND = 2 def area(boxlist): """Computes area of boxes. Args: boxlist: BoxList holding N boxes Returns: a numpy array with shape [N*1] representing box areas """ y_min, x_min, y_max, x_max = boxlist.get_coordinates() return (y_max - y_min) * (x_max - x_min) def intersection(boxlist1, boxlist2): """Compute pairwise intersection areas between boxes. Args: boxlist1: BoxList holding N boxes boxlist2: BoxList holding M boxes Returns: a numpy array with shape [N*M] representing pairwise intersection area """ return np_box_ops.intersection(boxlist1.get(), boxlist2.get()) def iou(boxlist1, boxlist2): """Computes pairwise intersection-over-union between box collections. Args: boxlist1: BoxList holding N boxes boxlist2: BoxList holding M boxes Returns: a numpy array with shape [N, M] representing pairwise iou scores. """ return np_box_ops.iou(boxlist1.get(), boxlist2.get()) def ioa(boxlist1, boxlist2): """Computes pairwise intersection-over-area between box collections. Intersection-over-area (ioa) between two boxes box1 and box2 is defined as their intersection area over box2's area. Note that ioa is not symmetric, that is, IOA(box1, box2) != IOA(box2, box1). Args: boxlist1: BoxList holding N boxes boxlist2: BoxList holding M boxes Returns: a numpy array with shape [N, M] representing pairwise ioa scores. """ return np_box_ops.ioa(boxlist1.get(), boxlist2.get()) def gather(boxlist, indices, fields=None): """Gather boxes from BoxList according to indices and return new BoxList. By default, gather returns boxes corresponding to the input index list, as well as all additional fields stored in the boxlist (indexing into the first dimension). However one can optionally only gather from a subset of fields. Args: boxlist: BoxList holding N boxes indices: a 1-d numpy array of type int_ fields: (optional) list of fields to also gather from. If None (default), all fields are gathered from. Pass an empty fields list to only gather the box coordinates. Returns: subboxlist: a BoxList corresponding to the subset of the input BoxList specified by indices Raises: ValueError: if specified field is not contained in boxlist or if the indices are not of type int_ """ if indices.size: if np.amax(indices) >= boxlist.num_boxes() or np.amin(indices) < 0: raise ValueError('indices are out of valid range.') subboxlist = np_box_list.BoxList(boxlist.get()[indices, :]) if fields is None: fields = boxlist.get_extra_fields() for field in fields: extra_field_data = boxlist.get_field(field) subboxlist.add_field(field, extra_field_data[indices, ...]) return subboxlist def sort_by_field(boxlist, field, order=SortOrder.DESCEND): """Sort boxes and associated fields according to a scalar field. A common use case is reordering the boxes according to descending scores. Args: boxlist: BoxList holding N boxes. field: A BoxList field for sorting and reordering the BoxList. order: (Optional) 'descend' or 'ascend'. Default is descend. Returns: sorted_boxlist: A sorted BoxList with the field in the specified order. Raises: ValueError: if specified field does not exist or is not of single dimension. ValueError: if the order is not either descend or ascend. """ if not boxlist.has_field(field): raise ValueError('Field ' + field + ' does not exist') if len(boxlist.get_field(field).shape) != 1: raise ValueError('Field ' + field + 'should be single dimension.') if order != SortOrder.DESCEND and order != SortOrder.ASCEND: raise ValueError('Invalid sort order') field_to_sort = boxlist.get_field(field) sorted_indices = np.argsort(field_to_sort) if order == SortOrder.DESCEND: sorted_indices = sorted_indices[::-1] return gather(boxlist, sorted_indices) def non_max_suppression(boxlist, max_output_size=10000, iou_threshold=1.0, score_threshold=-10.0): """Non maximum suppression. This op greedily selects a subset of detection bounding boxes, pruning away boxes that have high IOU (intersection over union) overlap (> thresh) with already selected boxes. In each iteration, the detected bounding box with highest score in the available pool is selected. Args: boxlist: BoxList holding N boxes. Must contain a 'scores' field representing detection scores. All scores belong to the same class. max_output_size: maximum number of retained boxes iou_threshold: intersection over union threshold. score_threshold: minimum score threshold. Remove the boxes with scores less than this value. Default value is set to -10. A very low threshold to pass pretty much all the boxes, unless the user sets a different score threshold. Returns: a BoxList holding M boxes where M <= max_output_size Raises: ValueError: if 'scores' field does not exist ValueError: if threshold is not in [0, 1] ValueError: if max_output_size < 0 """ if not boxlist.has_field('scores'): raise ValueError('Field scores does not exist') if iou_threshold < 0. or iou_threshold > 1.0: raise ValueError('IOU threshold must be in [0, 1]') if max_output_size < 0: raise ValueError('max_output_size must be bigger than 0.') boxlist = filter_scores_greater_than(boxlist, score_threshold) if boxlist.num_boxes() == 0: return boxlist boxlist = sort_by_field(boxlist, 'scores') # Prevent further computation if NMS is disabled. if iou_threshold == 1.0: if boxlist.num_boxes() > max_output_size: selected_indices = np.arange(max_output_size) return gather(boxlist, selected_indices) else: return boxlist boxes = boxlist.get() num_boxes = boxlist.num_boxes() # is_index_valid is True only for all remaining valid boxes, is_index_valid = np.full(num_boxes, 1, dtype=bool) selected_indices = [] num_output = 0 for i in range(num_boxes): if num_output < max_output_size: if is_index_valid[i]: num_output += 1 selected_indices.append(i) is_index_valid[i] = False valid_indices = np.where(is_index_valid)[0] if valid_indices.size == 0: break intersect_over_union = np_box_ops.iou( np.expand_dims(boxes[i, :], axis=0), boxes[valid_indices, :]) intersect_over_union = np.squeeze(intersect_over_union, axis=0) is_index_valid[valid_indices] = np.logical_and( is_index_valid[valid_indices], intersect_over_union <= iou_threshold) return gather(boxlist, np.array(selected_indices)) def multi_class_non_max_suppression(boxlist, score_thresh, iou_thresh, max_output_size): """Multi-class version of non maximum suppression. This op greedily selects a subset of detection bounding boxes, pruning away boxes that have high IOU (intersection over union) overlap (> thresh) with already selected boxes. It operates independently for each class for which scores are provided (via the scores field of the input box_list), pruning boxes with score less than a provided threshold prior to applying NMS. Args: boxlist: BoxList holding N boxes. Must contain a 'scores' field representing detection scores. This scores field is a tensor that can be 1 dimensional (in the case of a single class) or 2-dimensional, which which case we assume that it takes the shape [num_boxes, num_classes]. We further assume that this rank is known statically and that scores.shape[1] is also known (i.e., the number of classes is fixed and known at graph construction time). score_thresh: scalar threshold for score (low scoring boxes are removed). iou_thresh: scalar threshold for IOU (boxes that that high IOU overlap with previously selected boxes are removed). max_output_size: maximum number of retained boxes per class. Returns: a BoxList holding M boxes with a rank-1 scores field representing corresponding scores for each box with scores sorted in decreasing order and a rank-1 classes field representing a class label for each box. Raises: ValueError: if iou_thresh is not in [0, 1] or if input boxlist does not have a valid scores field. """ if not 0 <= iou_thresh <= 1.0: raise ValueError('thresh must be between 0 and 1') if not isinstance(boxlist, np_box_list.BoxList): raise ValueError('boxlist must be a BoxList') if not boxlist.has_field('scores'): raise ValueError('input boxlist must have \'scores\' field') scores = boxlist.get_field('scores') if len(scores.shape) == 1: scores = np.reshape(scores, [-1, 1]) elif len(scores.shape) == 2: if scores.shape[1] is None: raise ValueError('scores field must have statically defined second ' 'dimension') else: raise ValueError('scores field must be of rank 1 or 2') num_boxes = boxlist.num_boxes() num_scores = scores.shape[0] num_classes = scores.shape[1] if num_boxes != num_scores: raise ValueError('Incorrect scores field length: actual vs expected.') selected_boxes_list = [] for class_idx in range(num_classes): boxlist_and_class_scores = np_box_list.BoxList(boxlist.get()) class_scores = np.reshape(scores[0:num_scores, class_idx], [-1]) boxlist_and_class_scores.add_field('scores', class_scores) boxlist_filt = filter_scores_greater_than(boxlist_and_class_scores, score_thresh) nms_result = non_max_suppression(boxlist_filt, max_output_size=max_output_size, iou_threshold=iou_thresh, score_threshold=score_thresh) nms_result.add_field( 'classes', np.zeros_like(nms_result.get_field('scores')) + class_idx) selected_boxes_list.append(nms_result) selected_boxes = concatenate(selected_boxes_list) sorted_boxes = sort_by_field(selected_boxes, 'scores') return sorted_boxes def scale(boxlist, y_scale, x_scale): """Scale box coordinates in x and y dimensions. Args: boxlist: BoxList holding N boxes y_scale: float x_scale: float Returns: boxlist: BoxList holding N boxes """ y_min, x_min, y_max, x_max = np.array_split(boxlist.get(), 4, axis=1) y_min = y_scale * y_min y_max = y_scale * y_max x_min = x_scale * x_min x_max = x_scale * x_max scaled_boxlist = np_box_list.BoxList(np.hstack([y_min, x_min, y_max, x_max])) fields = boxlist.get_extra_fields() for field in fields: extra_field_data = boxlist.get_field(field) scaled_boxlist.add_field(field, extra_field_data) return scaled_boxlist def clip_to_window(boxlist, window): """Clip bounding boxes to a window. This op clips input bounding boxes (represented by bounding box corners) to a window, optionally filtering out boxes that do not overlap at all with the window. Args: boxlist: BoxList holding M_in boxes window: a numpy array of shape [4] representing the [y_min, x_min, y_max, x_max] window to which the op should clip boxes. Returns: a BoxList holding M_out boxes where M_out <= M_in """ y_min, x_min, y_max, x_max = np.array_split(boxlist.get(), 4, axis=1) win_y_min = window[0] win_x_min = window[1] win_y_max = window[2] win_x_max = window[3] y_min_clipped = np.fmax(np.fmin(y_min, win_y_max), win_y_min) y_max_clipped = np.fmax(np.fmin(y_max, win_y_max), win_y_min) x_min_clipped = np.fmax(np.fmin(x_min, win_x_max), win_x_min) x_max_clipped = np.fmax(np.fmin(x_max, win_x_max), win_x_min) clipped = np_box_list.BoxList( np.hstack([y_min_clipped, x_min_clipped, y_max_clipped, x_max_clipped])) clipped = _copy_extra_fields(clipped, boxlist) areas = area(clipped) nonzero_area_indices = np.reshape(np.nonzero(np.greater(areas, 0.0)), [-1]).astype(np.int32) return gather(clipped, nonzero_area_indices) def prune_non_overlapping_boxes(boxlist1, boxlist2, minoverlap=0.0): """Prunes the boxes in boxlist1 that overlap less than thresh with boxlist2. For each box in boxlist1, we want its IOA to be more than minoverlap with at least one of the boxes in boxlist2. If it does not, we remove it. Args: boxlist1: BoxList holding N boxes. boxlist2: BoxList holding M boxes. minoverlap: Minimum required overlap between boxes, to count them as overlapping. Returns: A pruned boxlist with size [N', 4]. """ intersection_over_area = ioa(boxlist2, boxlist1) # [M, N] tensor intersection_over_area = np.amax(intersection_over_area, axis=0) # [N] tensor keep_bool = np.greater_equal(intersection_over_area, np.array(minoverlap)) keep_inds = np.nonzero(keep_bool)[0] new_boxlist1 = gather(boxlist1, keep_inds) return new_boxlist1 def prune_outside_window(boxlist, window): """Prunes bounding boxes that fall outside a given window. This function prunes bounding boxes that even partially fall outside the given window. See also ClipToWindow which only prunes bounding boxes that fall completely outside the window, and clips any bounding boxes that partially overflow. Args: boxlist: a BoxList holding M_in boxes. window: a numpy array of size 4, representing [ymin, xmin, ymax, xmax] of the window. Returns: pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in. valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes in the input tensor. """ y_min, x_min, y_max, x_max = np.array_split(boxlist.get(), 4, axis=1) win_y_min = window[0] win_x_min = window[1] win_y_max = window[2] win_x_max = window[3] coordinate_violations = np.hstack([np.less(y_min, win_y_min), np.less(x_min, win_x_min), np.greater(y_max, win_y_max), np.greater(x_max, win_x_max)]) valid_indices = np.reshape( np.where(np.logical_not(np.max(coordinate_violations, axis=1))), [-1]) return gather(boxlist, valid_indices), valid_indices def concatenate(boxlists, fields=None): """Concatenate list of BoxLists. This op concatenates a list of input BoxLists into a larger BoxList. It also handles concatenation of BoxList fields as long as the field tensor shapes are equal except for the first dimension. Args: boxlists: list of BoxList objects fields: optional list of fields to also concatenate. By default, all fields from the first BoxList in the list are included in the concatenation. Returns: a BoxList with number of boxes equal to sum([boxlist.num_boxes() for boxlist in BoxList]) Raises: ValueError: if boxlists is invalid (i.e., is not a list, is empty, or contains non BoxList objects), or if requested fields are not contained in all boxlists """ if not isinstance(boxlists, list): raise ValueError('boxlists should be a list') if not boxlists: raise ValueError('boxlists should have nonzero length') for boxlist in boxlists: if not isinstance(boxlist, np_box_list.BoxList): raise ValueError('all elements of boxlists should be BoxList objects') concatenated = np_box_list.BoxList( np.vstack([boxlist.get() for boxlist in boxlists])) if fields is None: fields = boxlists[0].get_extra_fields() for field in fields: first_field_shape = boxlists[0].get_field(field).shape first_field_shape = first_field_shape[1:] for boxlist in boxlists: if not boxlist.has_field(field): raise ValueError('boxlist must contain all requested fields') field_shape = boxlist.get_field(field).shape field_shape = field_shape[1:] if field_shape != first_field_shape: raise ValueError('field %s must have same shape for all boxlists ' 'except for the 0th dimension.' % field) concatenated_field = np.concatenate( [boxlist.get_field(field) for boxlist in boxlists], axis=0) concatenated.add_field(field, concatenated_field) return concatenated def filter_scores_greater_than(boxlist, thresh): """Filter to keep only boxes with score exceeding a given threshold. This op keeps the collection of boxes whose corresponding scores are greater than the input threshold. Args: boxlist: BoxList holding N boxes. Must contain a 'scores' field representing detection scores. thresh: scalar threshold Returns: a BoxList holding M boxes where M <= N Raises: ValueError: if boxlist not a BoxList object or if it does not have a scores field """ if not isinstance(boxlist, np_box_list.BoxList): raise ValueError('boxlist must be a BoxList') if not boxlist.has_field('scores'): raise ValueError('input boxlist must have \'scores\' field') scores = boxlist.get_field('scores') if len(scores.shape) > 2: raise ValueError('Scores should have rank 1 or 2') if len(scores.shape) == 2 and scores.shape[1] != 1: raise ValueError('Scores should have rank 1 or have shape ' 'consistent with [None, 1]') high_score_indices = np.reshape(np.where(np.greater(scores, thresh)), [-1]).astype(np.int32) return gather(boxlist, high_score_indices) def change_coordinate_frame(boxlist, window): """Change coordinate frame of the boxlist to be relative to window's frame. Given a window of the form [ymin, xmin, ymax, xmax], changes bounding box coordinates from boxlist to be relative to this window (e.g., the min corner maps to (0,0) and the max corner maps to (1,1)). An example use case is data augmentation: where we are given groundtruth boxes (boxlist) and would like to randomly crop the image to some window (window). In this case we need to change the coordinate frame of each groundtruth box to be relative to this new window. Args: boxlist: A BoxList object holding N boxes. window: a size 4 1-D numpy array. Returns: Returns a BoxList object with N boxes. """ win_height = window[2] - window[0] win_width = window[3] - window[1] boxlist_new = scale( np_box_list.BoxList(boxlist.get() - [window[0], window[1], window[0], window[1]]), 1.0 / win_height, 1.0 / win_width) _copy_extra_fields(boxlist_new, boxlist) return boxlist_new def _copy_extra_fields(boxlist_to_copy_to, boxlist_to_copy_from): """Copies the extra fields of boxlist_to_copy_from to boxlist_to_copy_to. Args: boxlist_to_copy_to: BoxList to which extra fields are copied. boxlist_to_copy_from: BoxList from which fields are copied. Returns: boxlist_to_copy_to with extra fields. """ for field in boxlist_to_copy_from.get_extra_fields(): boxlist_to_copy_to.add_field(field, boxlist_to_copy_from.get_field(field)) return boxlist_to_copy_to def _update_valid_indices_by_removing_high_iou_boxes( selected_indices, is_index_valid, intersect_over_union, threshold): max_iou = np.max(intersect_over_union[:, selected_indices], axis=1) return np.logical_and(is_index_valid, max_iou <= threshold)
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/utils/np_box_list_ops.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.utils.learning_schedules.""" import numpy as np import tensorflow as tf from object_detection.utils import learning_schedules from object_detection.utils import test_case class LearningSchedulesTest(test_case.TestCase): def testExponentialDecayWithBurnin(self): def graph_fn(global_step): learning_rate_base = 1.0 learning_rate_decay_steps = 3 learning_rate_decay_factor = .1 burnin_learning_rate = .5 burnin_steps = 2 min_learning_rate = .05 learning_rate = learning_schedules.exponential_decay_with_burnin( global_step, learning_rate_base, learning_rate_decay_steps, learning_rate_decay_factor, burnin_learning_rate, burnin_steps, min_learning_rate) assert learning_rate.op.name.endswith('learning_rate') return (learning_rate,) output_rates = [ self.execute(graph_fn, [np.array(i).astype(np.int64)]) for i in range(9) ] exp_rates = [.5, .5, 1, 1, 1, .1, .1, .1, .05] self.assertAllClose(output_rates, exp_rates, rtol=1e-4) def testCosineDecayWithWarmup(self): def graph_fn(global_step): learning_rate_base = 1.0 total_steps = 100 warmup_learning_rate = 0.1 warmup_steps = 9 learning_rate = learning_schedules.cosine_decay_with_warmup( global_step, learning_rate_base, total_steps, warmup_learning_rate, warmup_steps) assert learning_rate.op.name.endswith('learning_rate') return (learning_rate,) exp_rates = [0.1, 0.5, 0.9, 1.0, 0] input_global_steps = [0, 4, 8, 9, 100] output_rates = [ self.execute(graph_fn, [np.array(step).astype(np.int64)]) for step in input_global_steps ] self.assertAllClose(output_rates, exp_rates) def testCosineDecayAfterTotalSteps(self): def graph_fn(global_step): learning_rate_base = 1.0 total_steps = 100 warmup_learning_rate = 0.1 warmup_steps = 9 learning_rate = learning_schedules.cosine_decay_with_warmup( global_step, learning_rate_base, total_steps, warmup_learning_rate, warmup_steps) assert learning_rate.op.name.endswith('learning_rate') return (learning_rate,) exp_rates = [0] input_global_steps = [101] output_rates = [ self.execute(graph_fn, [np.array(step).astype(np.int64)]) for step in input_global_steps ] self.assertAllClose(output_rates, exp_rates) def testCosineDecayWithHoldBaseLearningRateSteps(self): def graph_fn(global_step): learning_rate_base = 1.0 total_steps = 120 warmup_learning_rate = 0.1 warmup_steps = 9 hold_base_rate_steps = 20 learning_rate = learning_schedules.cosine_decay_with_warmup( global_step, learning_rate_base, total_steps, warmup_learning_rate, warmup_steps, hold_base_rate_steps) assert learning_rate.op.name.endswith('learning_rate') return (learning_rate,) exp_rates = [0.1, 0.5, 0.9, 1.0, 1.0, 1.0, 0.999702, 0.874255, 0.577365, 0.0] input_global_steps = [0, 4, 8, 9, 10, 29, 30, 50, 70, 120] output_rates = [ self.execute(graph_fn, [np.array(step).astype(np.int64)]) for step in input_global_steps ] self.assertAllClose(output_rates, exp_rates) def testManualStepping(self): def graph_fn(global_step): boundaries = [2, 3, 7] rates = [1.0, 2.0, 3.0, 4.0] learning_rate = learning_schedules.manual_stepping( global_step, boundaries, rates) assert learning_rate.op.name.endswith('learning_rate') return (learning_rate,) output_rates = [ self.execute(graph_fn, [np.array(i).astype(np.int64)]) for i in range(10) ] exp_rates = [1.0, 1.0, 2.0, 3.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0] self.assertAllClose(output_rates, exp_rates) def testManualSteppingWithWarmup(self): def graph_fn(global_step): boundaries = [4, 6, 8] rates = [0.02, 0.10, 0.01, 0.001] learning_rate = learning_schedules.manual_stepping( global_step, boundaries, rates, warmup=True) assert learning_rate.op.name.endswith('learning_rate') return (learning_rate,) output_rates = [ self.execute(graph_fn, [np.array(i).astype(np.int64)]) for i in range(9) ] exp_rates = [0.02, 0.04, 0.06, 0.08, 0.10, 0.10, 0.01, 0.01, 0.001] self.assertAllClose(output_rates, exp_rates) def testManualSteppingWithZeroBoundaries(self): def graph_fn(global_step): boundaries = [] rates = [0.01] learning_rate = learning_schedules.manual_stepping( global_step, boundaries, rates) return (learning_rate,) output_rates = [ self.execute(graph_fn, [np.array(i).astype(np.int64)]) for i in range(4) ] exp_rates = [0.01] * 4 self.assertAllClose(output_rates, exp_rates) if __name__ == '__main__': tf.test.main()
DeepLearningExamples-master
TensorFlow/Detection/SSD/models/research/object_detection/utils/learning_schedules_test.py