python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
"""Root logger for export app."""
logger = logging.getLogger(__name__) # noqa
# import numpy as np
# import pytest
try:
import tensorrt as trt
except ImportError:
logger.warning(
"Failed to import TRT package. TRT inference testing will not be available."
)
trt = None
# from nvidia_tao_tf1.core.export._tensorrt import Engine, ONNXEngineBuilder
MNIST_ONNX_FILE = "./nvidia_tao_tf1/core/export/data/mnist.onnx"
class TestOnnx(object):
"""Test ONNX export to TensorRT."""
def test_parser(self):
"""Test parsing an ONNX model."""
trt_verbosity = trt.Logger.Severity.INFO
tensorrt_logger = trt.Logger(trt_verbosity)
explicit_batch = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(
tensorrt_logger
) as builder, builder.create_network(explicit_batch) as network:
with trt.OnnxParser(network, tensorrt_logger) as parser:
with open(MNIST_ONNX_FILE, "rb") as model:
parser.parse(model.read())
# comment out these tests before we can create an onnx file with explicit batch
# walk around as these is failing with TRT 7.0 explicit batch
# def test_engine_builder(self):
# """Test inference on an ONNX model."""
# builder = ONNXEngineBuilder(MNIST_ONNX_FILE, verbose=True)
# engine = Engine(builder.get_engine())
# output = engine.infer(np.zeros((2, 1, 28, 28)))
# assert output["Plus214_Output_0"].shape == (2, 10)
# def test_engine_builder_fp16(self):
# """Test inference on an ONNX model in FP16 mode."""
# try:
# builder = ONNXEngineBuilder(MNIST_ONNX_FILE, verbose=True, dtype="fp16")
# except AttributeError as e:
# if "FP16 but not supported" in str(e):
# pytest.skip("FP16 not supported on platform.")
# engine = Engine(builder.get_engine())
# output = engine.infer(np.zeros((2, 1, 28, 28)))
# assert output["Plus214_Output_0"].shape == (2, 10)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/core/export/test_onnx.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Process and export quantized models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
from struct import pack, unpack
import keras
from keras.layers import Conv2D, Conv2DTranspose, DepthwiseConv2D
from keras.utils import CustomObjectScope
from nvidia_tao_tf1.core.models.templates.qdq_layer import QDQ
from nvidia_tao_tf1.core.models.templates.quantized_conv2d import QuantizedConv2D
from nvidia_tao_tf1.core.models.templates.quantized_conv2dtranspose import QuantizedConv2DTranspose
from nvidia_tao_tf1.core.models.templates.quantized_depthwiseconv2d import QuantizedDepthwiseConv2D
from nvidia_tao_tf1.core.utils.path_utils import expand_path
from nvidia_tao_tf1.cv.retinanet.initializers.prior_prob import PriorProbability
QAT_LAYER_MAPPING = {
QuantizedConv2D: Conv2D,
QuantizedConv2DTranspose: Conv2DTranspose,
QuantizedDepthwiseConv2D: DepthwiseConv2D
}
def check_for_quantized_layers(model):
"""Check Keras model for quantization layers."""
# syntax valid only under Python 3
qat_layers = [*QAT_LAYER_MAPPING.keys(), QDQ]
for layer in model.layers:
if type(layer) in qat_layers:
return True
return False
def process_quantized_layers(model, output_format, calib_cache=None, calib_json=None):
"""Remove QDQ, replace the quantized layer with non-QAT layer and extract calibration cache."""
network_dict = {"input_layers_of": {}, "new_output_tensor_of": {}}
# Set the input layers of each layer.
for layer in model.layers:
if len(layer._inbound_nodes) > 1:
inbound_layers_list = []
for i in range(len(layer._inbound_nodes)):
inbound_node = layer._inbound_nodes[i]
inbound_layers = [in_layer.name for in_layer in inbound_node.inbound_layers]
if len(inbound_layers) > 0:
inbound_layers_list += inbound_layers
network_dict["input_layers_of"].update({layer.name: sorted(inbound_layers_list)})
else:
inbound_node = layer._inbound_nodes[0]
inbound_layers = [in_layer.name for in_layer in inbound_node.inbound_layers]
if len(inbound_layers) > 0:
network_dict["input_layers_of"].update({layer.name: inbound_layers})
input_layers = [
l for l in model.layers if len(l._inbound_nodes[0].inbound_layers) == 0
]
assert len(input_layers) > 0, "No input layer was found."
assert len(input_layers) == len(
model.inputs
), "Number of input layers does not match number of input tensors."
for layer in input_layers:
input_tensor = layer._inbound_nodes[0].input_tensors[0]
assert input_tensor in model.inputs, "Input tensor not found in model inputs."
network_dict["new_output_tensor_of"].update({layer.name: input_tensor})
qdq_scale_dict = {}
for layer in model.layers:
if type(layer) == QDQ:
scaling_factor = layer.get_weights()
scaling_factor = scaling_factor[0]
prev_layer_name = network_dict["input_layers_of"][layer.name]
assert (
len(prev_layer_name) == 1
), "QDQ layer is expected to have only one input layer."
qdq_scale_dict[prev_layer_name[0]] = scaling_factor
for node in layer._outbound_nodes:
layer_name = node.outbound_layer.name
if type(node.outbound_layer) == QDQ:
raise AttributeError("Cascaded QDQ layers are not supported.")
idx = network_dict["input_layers_of"][layer_name].index(layer.name)
network_dict["input_layers_of"][layer_name][idx] = prev_layer_name[0]
output_tensors = []
tensor_scale_dict = {}
layer_count = {}
for layer in model.layers:
if layer.name not in network_dict["input_layers_of"]:
# It's an input layer.
if layer.name in qdq_scale_dict:
tensor_name = layer.output.name
# UFF exporter freezes the graph into a .pb file before exporting to UFF.
# As a result, the ":0", ":1", ... which indicates the output index of
# a TensorFlow OP in the output tensor name will be removed from the name
# of the tensors. The ONNX exporter does not seem to be starting from
# a frozen graph.
if output_format != "onnx":
tensor_name = tensor_name.split(":")[0]
tensor_scale_dict[tensor_name] = qdq_scale_dict[layer.name]
continue
if type(layer) == QDQ:
continue
# Determine input tensors.
layer_input = [
network_dict["new_output_tensor_of"][layer_aux]
for layer_aux in network_dict["input_layers_of"][layer.name]
]
if isinstance(layer_input[0], list):
layer_input = layer_input[0]
if len(layer_input) == 1:
layer_input = layer_input[0]
if type(layer) in QAT_LAYER_MAPPING:
x = layer_input
layer_config = layer.get_config()
layer_config.pop("bitwidth")
quantize_input = layer_config.pop("quantize")
new_layer = QAT_LAYER_MAPPING[type(layer)].from_config(layer_config)
if quantize_input:
if layer.use_bias:
kernels, biases, scaling_factor = layer.get_weights()
else:
kernels, scaling_factor = layer.get_weights()
assert (
scaling_factor.shape == ()
), "Unexpected shape for scaling factor parameter."
else:
if layer.use_bias:
kernels, biases = layer.get_weights()
else:
kernels = layer.get_weights()[0]
x = new_layer(x)
if layer.use_bias:
new_layer.set_weights([kernels, biases])
else:
new_layer.set_weights([kernels])
if (
quantize_input
and type(layer._inbound_nodes[0].inbound_layers[0]) != QDQ
):
tensor_name = layer.input.name
if output_format != "onnx":
tensor_name = tensor_name.split(":")[0]
if tensor_name in tensor_scale_dict:
tensor_scale_dict[tensor_name] = max(
tensor_scale_dict[tensor_name], scaling_factor
)
else:
tensor_scale_dict[tensor_name] = scaling_factor
else:
weights = layer.get_weights()
layer_config = layer.get_config()
with CustomObjectScope({'PriorProbability': PriorProbability}):
new_layer = type(layer).from_config(layer_config)
if not isinstance(layer_input, list) or type(layer) in [
keras.layers.Add, keras.layers.Multiply, keras.layers.Concatenate
]:
x = new_layer(layer_input)
new_layer.set_weights(weights)
else:
if len(layer_input) > 1:
if len(network_dict["input_layers_of"][layer.name]) > 1:
x_list = []
for i in range(len(layer_input)):
x = new_layer(layer_input[i])
new_layer.set_weights(weights)
x_list.append(x)
x = x_list
else:
# To support RetinaNet subnets, AnchorBox and Permute layers
if network_dict["input_layers_of"][layer.name][0] in layer_count:
layer_count[network_dict["input_layers_of"][layer.name][0]] += 1
else:
layer_count[network_dict["input_layers_of"][layer.name][0]] = 0
layer_count[network_dict["input_layers_of"][layer.name][0]] %= 5
x = new_layer(
layer_input[
layer_count[network_dict["input_layers_of"][layer.name][0]]])
new_layer.set_weights(weights)
else:
raise ValueError("Model not supported!")
if layer.name in qdq_scale_dict:
tensor_name = layer.output.name
if output_format != "onnx":
tensor_name = tensor_name.split(":")[0]
tensor_scale_dict[tensor_name] = qdq_scale_dict[layer.name]
if len(layer._outbound_nodes) == 0:
output_tensors.append(x)
for node in layer._outbound_nodes:
outbound_layer = node.outbound_layer
if type(outbound_layer) == QDQ:
if len(outbound_layer._outbound_nodes) == 0:
output_tensors.append(x)
network_dict["new_output_tensor_of"].update({layer.name: x})
model = keras.models.Model(inputs=model.inputs, outputs=output_tensors)
if calib_cache is not None:
cal_cache_str = "1\n"
for tensor in tensor_scale_dict:
scaling_factor = tensor_scale_dict[tensor] / 127.0
cal_scale = hex(unpack("i", pack("f", scaling_factor))[0])
assert cal_scale.startswith("0x"), "Hex number expected to start with 0x."
cal_scale = cal_scale[2:]
cal_cache_str += tensor + ": " + cal_scale + "\n"
with open(expand_path(calib_cache), "w") as f:
f.write(cal_cache_str)
if calib_json is not None:
calib_json_data = {"tensor_scales": {}}
for tensor in tensor_scale_dict:
calib_json_data["tensor_scales"][tensor] = float(tensor_scale_dict[tensor])
with open(expand_path(calib_json), "w") as outfile:
json.dump(calib_json_data, outfile, indent=4)
return model, tensor_scale_dict
| tao_tensorflow1_backend-main | nvidia_tao_tf1/core/export/_quantized.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modulus export application.
This module includes APIs to export a Keras model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import logging
import logging.config
import os
import sys
import tempfile
import time
import h5py
import keras
import numpy as np
from nvidia_tao_tf1.core.export._quantized import (
check_for_quantized_layers,
process_quantized_layers,
)
from nvidia_tao_tf1.core.export._tensorrt import keras_to_tensorrt
from nvidia_tao_tf1.core.export._uff import _reload_model_for_inference, keras_to_uff
from nvidia_tao_tf1.core.export.caffe import keras_to_caffe
from nvidia_tao_tf1.core.export.data import TensorFile
from nvidia_tao_tf1.core.utils.path_utils import expand_path
from third_party.keras.tensorflow_backend import limit_tensorflow_GPU_mem
if sys.version_info >= (3, 0):
from nvidia_tao_tf1.core.export._onnx import ( # pylint: disable=C0412
keras_to_onnx,
validate_onnx_inference,
)
"""Root logger for export app."""
logger = logging.getLogger(__name__)
def get_input_dims(tensor_filename):
"""Return sample input dimensions (excluding batch dimension)."""
with TensorFile(tensor_filename, "r") as data_file:
batch = data_file.read()
input_dims = np.array(batch).shape[1:]
return input_dims
def get_model_input_dtype(keras_hdf5_file):
"""Return input data type of a Keras model."""
with h5py.File(keras_hdf5_file, mode="r") as f:
model_config = f.attrs.get("model_config")
model_config = json.loads(model_config.decode("utf-8"))
input_layer_name = model_config["config"]["input_layers"][0][0]
layers = model_config["config"]["layers"]
input_layer = next(layer for layer in layers if layer["name"] == input_layer_name)
data_type = str(input_layer["config"]["dtype"])
if not data_type:
raise RuntimeError(
"Missing input layer data type in {}".format(keras_hdf5_file)
)
return data_type
def export_app(args):
"""Wrapper around export APIs.
Args:
args (dict): command-line arguments.
"""
# Limit TensorFlow GPU memory usage.
limit_tensorflow_GPU_mem(gpu_fraction=0.5)
start_time = time.time()
input_filename = args["input_file"]
output_filename = args["output_file"]
output_format = args["format"]
input_dims = args["input_dims"]
output_node_names = args["outputs"]
max_workspace_size = args["max_workspace_size"]
max_batch_size = args["max_batch_size"]
data_type = args["data_type"]
data_filename = args["data_file"]
calibration_cache_filename = args["cal_cache"]
batch_size = args["batch_size"]
batches = args["batches"]
fp32_layer_names = args["fp32_layer_names"]
fp16_layer_names = args["fp16_layer_names"]
parser = args["parser"]
random_data = args["random_data"]
verbose = args["verbose"]
custom_objects = args.get("custom_objects")
# Create list of exclude layers from command-line, if provided.
if fp32_layer_names is not None:
fp32_layer_names = fp32_layer_names.split(",")
if fp16_layer_names is not None:
fp16_layer_names = fp16_layer_names.split(",")
if output_filename and "/" in output_filename:
dirname = os.path.dirname(output_filename)
if not os.path.exists(expand_path(dirname)):
os.makedirs(expand_path(dirname))
# Set up logging.
verbosity = "DEBUG" if verbose else "INFO"
logging.basicConfig(
format="%(asctime)s [%(levelname)s] %(name)s: %(message)s", level=verbosity
)
logger.info("Loading model from %s", input_filename)
# Configure backend floatx according to the model input layer.
model_input_dtype = get_model_input_dtype(input_filename)
keras.backend.set_floatx(model_input_dtype)
keras.backend.set_learning_phase(0)
# Load model from disk.
model = keras.models.load_model(
input_filename, compile=False, custom_objects=custom_objects
)
tensor_scale_dict = None
if check_for_quantized_layers(model):
assert data_type != "int8", (
"Models with QuantizedConv2D layer are mixed-precision models."
" Set data_type to fp32 or fp16 for non-quantized layers."
" QuantizedConv2D layers will be handled automatically."
)
if calibration_cache_filename == "cal.bin":
calibration_cache_filename = input_filename + ".cal.bin"
calib_json_file = calibration_cache_filename + ".json"
model, tensor_scale_dict = process_quantized_layers(
model, output_format, calibration_cache_filename, calib_json_file
)
# Output node names may be explicitly specified if there are
# more than one output node. Otherwise, the output node will
# default to the last layer in the Keras model.
if output_node_names is not None:
output_node_names = output_node_names.split(",")
input_shapes = []
if output_format == "caffe":
if output_filename is None:
output_filename = input_filename
prototxt_filename = "%s.%s" % (output_filename, "prototxt")
model_filename = "%s.%s" % (output_filename, "caffemodel")
in_tensor_name, out_tensor_names = keras_to_caffe(
model,
prototxt_filename,
model_filename,
output_node_names=output_node_names,
)
logger.info("Exported model definition was saved into %s", prototxt_filename)
logger.info("Exported model weights were saved into %s", model_filename)
elif output_format == "uff":
if output_filename is None:
output_filename = "%s.%s" % (input_filename, "uff")
in_tensor_name, out_tensor_names, input_shapes = keras_to_uff(
model,
output_filename,
output_node_names=output_node_names,
custom_objects=custom_objects,
)
logger.info("Exported model was saved into %s", output_filename)
elif output_format == "onnx":
if sys.version_info < (3, 0):
raise ValueError(
"Exporting to onnx format is only supported under Python 3."
)
if output_node_names:
raise ValueError(
"Only exporting the entire keras model -> onnx is supported. Can't select"
"custom output layers"
)
if output_filename is None:
output_filename = "%s.%s" % (input_filename, "onnx")
model = _reload_model_for_inference(model, custom_objects=custom_objects)
in_tensor_name, out_tensor_names, input_shapes = keras_to_onnx(
model,
output_filename,
custom_objects=custom_objects,
target_opset=args["target_opset"],
)
success, error_str = validate_onnx_inference(
keras_model=model, onnx_model_file=output_filename
)
if not success:
logger.warning(
"Validation of model with onnx-runtime failed. Error:{}".format(
error_str
)
)
logger.info("Exported model was saved into %s", output_filename)
elif output_format == "tensorrt":
# Get input dimensions from data file if one was specified.
if data_filename is not None:
input_dims = get_input_dims(data_filename)
else:
# In the absence of a data file, get input dimensions from
# the command line.
if input_dims is None:
raise ValueError(
"Input dimensions must be specified for the export to "
"TensorRT format."
)
input_dims = [int(item) for item in input_dims.split(",")]
if random_data:
os_handle, data_filename = tempfile.mkstemp(suffix=".tensorfile")
os.close(os_handle)
with TensorFile(data_filename, "w") as f:
for _ in range(batches):
f.write(np.random.sample((batch_size,) + tuple(input_dims)))
if output_filename is None:
output_filename = "%s.%s" % (input_filename, "trt")
if data_type == "int8" and data_filename is None:
raise ValueError(
"A calibration data file must be provided for INT8 export."
)
in_tensor_name, out_tensor_names, engine = keras_to_tensorrt(
model,
input_dims,
output_node_names=output_node_names,
dtype=data_type,
max_workspace_size=max_workspace_size,
max_batch_size=max_batch_size,
calibration_data_filename=data_filename,
calibration_cache_filename=calibration_cache_filename,
calibration_n_batches=batches,
calibration_batch_size=batch_size,
fp32_layer_names=fp32_layer_names,
fp16_layer_names=fp16_layer_names,
parser=parser,
tensor_scale_dict=tensor_scale_dict,
)
# Infer some test images if a data file was specified. This will
# also print timing information if verbose mode was turned ON.
if data_filename is not None:
with TensorFile(data_filename, "r") as data_file:
data_generator = (data_file.read()[:batch_size] for _ in range(batches))
for _ in engine.infer_iterator(data_generator):
pass
if random_data and os.path.exists(expand_path(data_filename)):
os.remove(expand_path(data_filename))
engine.save(output_filename)
logger.info("Exported model was saved into %s", output_filename)
else:
raise ValueError("Unknown output format: %s" % output_format)
logger.info("Input node: %s", in_tensor_name)
logger.info("Output node(s): %s", out_tensor_names)
logger.debug("Done after %s seconds", time.time() - start_time)
return {
"inputs": in_tensor_name,
"outputs": out_tensor_names,
"input_shapes": input_shapes,
}
def add_parser_arguments(parser):
"""Adds the modulus export supported command line arguments to the given parser.
Args:
parser (argparse.ArgumentParser): The parser to add the arguemnts to.
"""
# Positional arguments.
parser.add_argument("input_file", help="Input file (Keras .h5 or TensorRT .uff).")
# Optional arguments.
parser.add_argument(
"--batch_size",
type=int,
default=8,
help="Batch size to use for calibration and inference testing.",
)
parser.add_argument(
"--batches",
type=int,
default=10,
help="Number of batches to use for calibration and inference testing.",
)
parser.add_argument(
"--cal_cache", default="cal.bin", help="Calibration cache file to write to."
)
parser.add_argument(
"--data_type",
type=str,
default="fp32",
help="Data type to use for TensorRT export.",
choices=["fp32", "fp16", "int8"],
)
parser.add_argument(
"--data_file",
default=None,
help="TensorFile of data to use for calibration and inference testing.",
)
parser.add_argument(
"-f",
"--format",
type=str,
default="uff",
help="Output format",
choices=["caffe", "onnx", "uff", "tensorrt"],
)
parser.add_argument(
"--input_dims",
type=str,
default=None,
help="Comma-separated list of input dimensions. This is "
"needed for the export to TensorRT format. If a data file is "
"provided the input dimensions will be inferred from the file.",
)
parser.add_argument(
"--max_batch_size",
type=int,
default=16,
help="Maximum batch size of TensorRT engine in case of export to "
"TensorRT format.",
)
parser.add_argument(
"--max_workspace_size",
type=int,
default=(1 << 30),
help="Maximum workspace size of TensorRT engine in case of export to "
"TensorRT format.",
)
parser.add_argument(
"-o",
"--output_file",
type=str,
default=None,
help="Output file (defaults to $(input_filename).$(format)).",
)
parser.add_argument(
"--outputs",
type=str,
default=None,
help="Comma-separated list of output blob names.",
)
parser.add_argument(
"--fp32_layer_names",
type=str,
default=None,
help="Comma separated list of layers to be float32 precision.",
)
parser.add_argument(
"--fp16_layer_names",
type=str,
default=None,
help="Comma separated list of layers to be float16 precision.",
)
parser.add_argument(
"--parser",
type=str,
default="uff",
choices=["caffe", "onnx", "uff"],
help="Parser to use for intermediate model representation "
"in case of TensorRT export. Note, using onnx as parser is still under test,"
" please be aware of the risk.",
)
parser.add_argument(
"--random_data",
action="store_true",
help="Use random data during calibration and inference.",
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="Verbose messages."
)
parser.add_argument(
"--target_opset",
type=int,
default=None,
help="target_opset for ONNX converter. default=<default used by"
"the current keras2onnx package.",
)
def main(args=None):
"""Export application.
If MagLev was installed through ``pip`` then this application can be
run from a shell. For example::
$ maglev-export model.h5
See command-line help for more information.
Args:
args (list): Arguments to parse.
"""
if not args:
args = sys.argv[1:]
# Reduce TensorFlow verbosity.
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
parser = argparse.ArgumentParser(
description="Export a MagLev model",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
add_parser_arguments(parser)
args = vars(parser.parse_args(args))
export_app(args)
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/core/export/app.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modulus export APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import sys
import keras
import numpy as np
from nvidia_tao_tf1.core.export._quantized import (
check_for_quantized_layers,
process_quantized_layers,
)
from nvidia_tao_tf1.core.export._uff import _reload_model_for_inference
import onnx
if sys.version_info >= (3, 0):
import keras2onnx
import onnxruntime as rt
"""Logger for ONNX export APIs."""
logger = logging.getLogger(__name__)
def keras_to_onnx(model, output_filename, custom_objects=None, target_opset=None):
"""Export a Keras model to ONNX format.
Args:
model (Model): Keras model to export.
output_filename (str): file to write exported model to.
custom_objects (dict): dictionary mapping names (strings) to custom
classes or functions to be considered during deserialization for export.
target_opset (int): Target opset version to use, default=<default opset for
the current keras2onnx installation>
Returns:
tuple<in_tensor_name(s), out_tensor_name(s), in_tensor_shape(s)>:
in_tensor_name(s): The name(s) of the input nodes. If there is only one name, it will be
returned as a single string, otherwise a list of strings.
out_tensor_name(s): The name(s) of the output nodes. If there is only one name, it will be
returned as a single string, otherwise a list of strings.
in_tensor_shape(s): The shape(s) of the input tensors for this network. If there is only
one input tensor, it will be returned as a single list<int>, otherwise
a list<list<int>>.
These must be passed to the TensorRT optimization tool to identify input and output blobs.
"""
if check_for_quantized_layers(model):
calib_json = output_filename + ".json"
model, _ = process_quantized_layers(model, "onnx", calib_json=calib_json)
model = _reload_model_for_inference(model, custom_objects=custom_objects)
onnx_model = keras2onnx.convert_keras(
model,
model.name,
custom_op_conversions=custom_objects,
target_opset=target_opset,
)
logger.debug("Model converted to ONNX, checking model validity with onnx.checker.")
# onnx.checker.check_model(onnx_model)
onnx.save_model(onnx_model, output_filename)
out_name = []
for keras_out in model.outputs:
for i in range(len(onnx_model.graph.output)):
name = onnx_model.graph.output[i].name
if keras_out._op.name in name or name in keras_out._op.name:
out_name.append(name)
break
out_name = out_name[0] if len(out_name) == 1 else out_name
in_name = []
in_shape = []
# ONNX graph inputs contain inputs that are not keras input layers.
for keras_in in model.inputs:
for i in range(len(onnx_model.graph.input)):
name = onnx_model.graph.input[i].name
# The ONNX input layer names are named the same as in keras, but contain extra
# information like "_01" at the end or vice versa.
if keras_in._op.name in name or name in keras_in._op.name:
in_name.append(name)
in_shape.append(keras.backend.int_shape(keras_in))
in_name = in_name[0] if len(in_name) == 1 else in_name
in_shape = in_shape[0] if len(in_shape) else in_shape
return in_name, out_name, in_shape
def validate_onnx_inference(
keras_model: keras.models.Model, onnx_model_file: str, tolerance=1e-4
) -> (bool, str):
"""Validate onnx model with onnx runtime..
Args:
keras_model (Model): Loaded Keras model.
onnx_model_file (str): ONNX model filepath.
Returns:
Tuple (success, error_str)
success(bool): True for success, False for failure.
error_str(str): String which describes the error in case of failure.
"""
sess = rt.InferenceSession(onnx_model_file)
ort_inputs = sess.get_inputs()
ort_outputs = sess.get_outputs()
for i, (dim_onnx, dim_keras) in enumerate(
zip(ort_outputs[0].shape, keras_model.outputs[0].get_shape().as_list())
):
if dim_onnx is not None and dim_keras is not None:
if dim_onnx != dim_keras:
return (
False,
"The {} dim of onnx runtime parsed model does not match"
"the keras model. Onnx runtime model dims:{},"
"keras model dims:{}".format(
len(ort_outputs[0].shape) - i,
ort_outputs[0].shape,
keras_model.outputs[0].get_shape().as_list(),
),
)
# Sample inference run with onnxruntime to verify model validity.
# If the model is defined with a fixed batch size, pick that as
# the input batch_size of validating the model.
# For whatever reason, if batch dimension is not specified in the model,
# the onnx runtime returns it as a string `None` istead of a python
# inbuilt None.
if isinstance(ort_inputs[0].shape[0], int):
test_batch_size = ort_inputs[0].shape[0]
else:
test_batch_size = 8
model_inputs = [
np.random.uniform(size=([test_batch_size] + ort_input.shape[1:])).astype(
np.float32
)
for ort_input in ort_inputs
]
pred_onnx = sess.run(
[ort_output.name for ort_output in ort_outputs],
{ort_input.name: model_inputs[i] for i, ort_input in enumerate(ort_inputs)},
)
if list(pred_onnx[0].shape) != ([test_batch_size] + ort_outputs[0].shape[1:]):
return (
False,
"Onnx runtime prediction not of expected shape"
"Expected shape:{}, onnx runtime prediction shape:{}".format(
[test_batch_size] + ort_outputs[0].shape[1:], pred_onnx[0].shape
),
)
pred_keras = keras_model.predict(model_inputs, batch_size=test_batch_size)
# Check keras and ort predictions are close enough.
mse = sum(
[
np.square(pred_onnx[i] - pred_keras[i]).mean(axis=None)
for i in range(len(pred_onnx))
]
)
if mse > 1e-4:
return (
False,
"Onnx-runtime and keras model predictions differ"
" by mean squared error {}.".format(mse),
)
return True, ""
| tao_tensorflow1_backend-main | nvidia_tao_tf1/core/export/_onnx.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modulus INT8 calibration APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from io import open # Python 2/3 compatibility. pylint: disable=W0622
import logging
import os
import sys
import tempfile
import traceback
import numpy as np
from nvidia_tao_tf1.core.decorators import override, subclass
from nvidia_tao_tf1.core.export._onnx import keras_to_onnx
from nvidia_tao_tf1.core.export._uff import keras_to_uff
from nvidia_tao_tf1.core.export.caffe import keras_to_caffe
from nvidia_tao_tf1.core.export.data import TensorFile
"""Logger for data export APIs."""
logger = logging.getLogger(__name__)
try:
import pycuda.autoinit # noqa pylint: disable=W0611
import pycuda.driver as cuda
import tensorrt as trt
except ImportError:
# TODO(xiangbok): we should probably do this test in modulus/export/__init__.py.
logger.warning(
"Failed to import TRT and/or CUDA. TensorRT optimization "
"and inference will not be available."
)
DEFAULT_MAX_WORKSPACE_SIZE = 1 << 30
DEFAULT_MAX_BATCH_SIZE = 100
DEFAULT_MIN_BATCH_SIZE = 1
DEFAULT_OPT_BATCH_SIZE = 100
# Array of TensorRT loggers. We need to keep global references to
# the TensorRT loggers that we create to prevent them from being
# garbage collected as those are referenced from C++ code without
# Python knowing about it.
tensorrt_loggers = []
# If we were unable to load TensorRT packages because TensorRT is not installed
# then we will stub the exported API.
if "trt" not in globals():
keras_to_tensorrt = None
load_tensorrt_engine = None
else:
# We were able to load TensorRT package so we are implementing the API
def _create_tensorrt_logger(verbose=False):
"""Create a TensorRT logger.
Args:
verbose (bool): whether to make the logger verbose.
"""
if str(os.getenv('SUPPRES_VERBOSE_LOGGING', '0')) == '1':
# Do not print any warnings in TLT docker
trt_verbosity = trt.Logger.Severity.ERROR
elif str(os.getenv('SUPPRES_VERBOSE_LOGGING', '0')) == '0':
trt_verbosity = trt.Logger.Severity.INFO
elif verbose:
trt_verbosity = trt.Logger.Severity.VERBOSE
else:
trt_verbosity = trt.Logger.Severity.WARNING
tensorrt_logger = trt.Logger(trt_verbosity)
tensorrt_loggers.append(tensorrt_logger)
return tensorrt_logger
class Calibrator(trt.IInt8EntropyCalibrator2):
"""Calibrator class.
This inherits from ``trt.IInt8EntropyCalibrator2`` to implement
the calibration interface that TensorRT needs to calibrate the
INT8 quantization factors.
Args:
data_filename (str): ``TensorFile`` data file to use.
calibration_filename (str): Name of calibration to read/write to.
n_batches (int): Number of batches for calibrate for.
batch_size (int): Batch size to use for calibration (this must be
smaller or equal to the batch size of the provided data).
"""
def __init__(
self, data_filename, cache_filename, n_batches, batch_size, *args, **kwargs
):
"""Init routine."""
super(Calibrator, self).__init__(*args, **kwargs)
self._data_file = TensorFile(data_filename, "r")
self._cache_filename = cache_filename
self._batch_size = batch_size
self._n_batches = n_batches
self._batch_count = 0
self._data_mem = None
def get_batch(self, names):
"""Return one batch.
Args:
names (list): list of memory bindings names.
"""
if self._batch_count < self._n_batches:
batch = np.array(self._data_file.read())
if batch is not None:
if batch.shape[0] < self._batch_size:
raise ValueError(
"Data file batch size (%d) < request batch size (%d)"
% (batch.shape[0], self._batch_size)
)
batch = batch[: self._batch_size]
if self._data_mem is None:
self._data_mem = cuda.mem_alloc(
batch.size * 4
) # 4 bytes per float32.
self._batch_count += 1
# Transfer input data to device.
cuda.memcpy_htod(
self._data_mem, np.ascontiguousarray(batch, dtype=np.float32)
)
return [int(self._data_mem)]
self._data_mem.free()
return None
def get_batch_size(self):
"""Return batch size."""
return self._batch_size
def read_calibration_cache(self):
"""Read calibration from file."""
logger.debug("read_calibration_cache - no-op")
if os.path.isfile(self._cache_filename):
logger.warning(
"Calibration file %s exists but is being "
"ignored." % self._cache_filename
)
def write_calibration_cache(self, cache):
"""Write calibration to file.
Args:
cache (memoryview): buffer to read calibration data from.
"""
logger.info(
"Saving calibration cache (size %d) to %s",
len(cache),
self._cache_filename,
)
with open(self._cache_filename, "wb") as f:
f.write(cache)
class Engine(object):
"""A class to represent a TensorRT engine.
This class provides utility functions for performing inference on
a TensorRT engine.
Args:
engine: CUDA engine to wrap.
forward_time_ema_decay (float): Decay factor for smoothing the calculation of
forward time. By default, no smoothing is applied.
"""
def __init__(self, engine, forward_time_ema_decay=0.0):
"""Initialization routine."""
self._engine = engine
self._context = None
self._forward_time = None
self._forward_time_ema_decay = forward_time_ema_decay
@contextlib.contextmanager
def _create_context(self):
"""Create an execution context and allocate input/output buffers."""
try:
with self._engine.create_execution_context() as self._context:
self._device_buffers = []
self._host_buffers = []
self._input_binding_ids = {}
max_batch_size = self._engine.max_batch_size
for i in range(self._engine.num_bindings):
shape = self._engine.get_binding_shape(i)
if len(shape) == 3:
size = trt.volume(shape)
elt_count = size * max_batch_size
output_shape = (max_batch_size, shape[0], shape[1], shape[2])
elif len(shape) == 4 and shape[0] not in [-1, None]:
# explicit batch
elt_count = shape[0] * shape[1] * shape[2] * shape[3]
output_shape = shape
elif len(shape) == 2:
elt_count = shape[0] * shape[1] * max_batch_size
output_shape = (max_batch_size, shape[0], shape[1])
elif len(shape) == 1:
elt_count = shape[0] * max_batch_size
output_shape = (max_batch_size, shape[0])
else:
raise ValueError("Unhandled shape: {}".format(str(shape)))
if self._engine.binding_is_input(i):
binding_name = self._engine.get_binding_name(i)
self._input_binding_ids[binding_name] = i
page_locked_mem = None
else:
page_locked_mem = cuda.pagelocked_empty(
elt_count, dtype=np.float32
)
page_locked_mem = page_locked_mem.reshape(*output_shape)
# Allocate pagelocked memory.
self._host_buffers.append(page_locked_mem)
self._device_buffers.append(
cuda.mem_alloc(elt_count * np.dtype(np.float32).itemsize)
)
if not self._input_binding_ids:
raise RuntimeError("No input bindings detected.")
# Create stream and events to measure timings.
self._stream = cuda.Stream()
self._start = cuda.Event()
self._end = cuda.Event()
yield
finally:
# Release context and allocated memory.
self._release_context()
def _do_infer(self, batch):
bindings = [int(device_buffer) for device_buffer in self._device_buffers]
if not isinstance(batch, dict):
if len(self._input_binding_ids) > 1:
raise ValueError(
"Input node names must be provided in case of multiple "
"inputs. "
"Got these inputs: %s" % self._input_binding_ids.keys()
)
# Single input case.
batch = {list(self._input_binding_ids.keys())[0]: batch}
batch_sizes = {array.shape[0] for array in batch.values()}
if len(batch_sizes) != 1:
raise ValueError(
"All arrays must have the same batch size. "
"Got %s." % repr(batch_sizes)
)
batch_size = batch_sizes.pop()
if (
self._engine.has_implicit_batch_dimension and
batch_size > self._engine.max_batch_size
):
raise ValueError(
"Batch size (%d) > max batch size (%d)"
% (batch_size, self._engine.max_batch_size)
)
# Transfer input data to device.
for node_name, array in batch.items():
array = array.astype("float32")
cuda.memcpy_htod_async(
self._device_buffers[self._input_binding_ids[node_name]],
array,
self._stream,
)
# Execute model.
self._start.record(self._stream)
if self._engine.has_implicit_batch_dimension:
# UFF
self._context.execute_async(batch_size, bindings, self._stream.handle, None)
else:
# ONNX
self._context.execute_async_v2(bindings, self._stream.handle, None)
self._end.record(self._stream)
self._end.synchronize()
elapsed_ms_per_batch = self._end.time_since(self._start)
elapsed_ms_per_sample = elapsed_ms_per_batch / batch_size
logger.debug(
"Elapsed time: %.3fms, %.4fms/sample.",
elapsed_ms_per_batch,
elapsed_ms_per_sample,
)
# CUDA time_since returns durations in milliseconds.
elapsed_time_per_sample = 1e-3 * elapsed_ms_per_sample
if self._forward_time is None:
self._forward_time = elapsed_time_per_sample
else:
a = self._forward_time_ema_decay
self._forward_time = (
1 - a
) * elapsed_time_per_sample + a * self._forward_time
# Transfer predictions back.
outputs = {}
for i in range(self._engine.num_bindings):
if not self._engine.binding_is_input(i):
# Using a synchronous memcpy here to ensure outputs are ready
# for consumption by caller upon returning from this call.
cuda.memcpy_dtoh(self._host_buffers[i], self._device_buffers[i])
out = self._host_buffers[i][:batch_size]
name = self._engine.get_binding_name(i)
outputs[name] = out
return outputs
def _release_context(self):
"""Release context and allocated memory."""
for device_buffer in self._device_buffers:
device_buffer.free()
del (device_buffer)
for host_buffer in self._host_buffers:
del (host_buffer)
del (self._start)
del (self._end)
del (self._stream)
def get_forward_time(self):
"""Return the inference duration.
The duration is calculated at the CUDA level and excludes
data loading and post-processing.
If a decay factor is specified in the constructor,
the returned value is smoothed with an exponential moving
average.
The returned value is expressed in seconds.
"""
return self._forward_time
def infer(self, batch):
"""Perform inference on a Numpy array.
Args:
batch (ndarray): array to perform inference on.
Returns:
A dictionary of outputs where keys are output names
and values are output tensors.
"""
with self._create_context():
outputs = self._do_infer(batch)
return outputs
def infer_iterator(self, iterator):
"""Perform inference on an iterator of Numpy arrays.
This method should be preferred to ``infer`` when performing
inference on multiple Numpy arrays since this will re-use
the allocated execution and memory.
Args:
iterator: an iterator that yields Numpy arrays.
Yields:
A dictionary of outputs where keys are output names
and values are output tensors, for each array returned
by the iterator.
Returns:
None.
"""
with self._create_context():
for batch in iterator:
outputs = self._do_infer(batch)
yield outputs
def save(self, filename):
"""Save serialized engine into specified file.
Args:
filename (str): name of file to save engine to.
"""
with open(filename, "wb") as outf:
outf.write(self._engine.serialize())
def _set_excluded_layer_precision(network, fp32_layer_names, fp16_layer_names):
"""When generating an INT8 model, it sets excluded layers' precision as fp32 or fp16.
In detail, this function is only used when generating INT8 TensorRT models. It accepts
two lists of layer names: (1). for the layers in fp32_layer_names, their precision will
be set as fp32; (2). for those in fp16_layer_names, their precision will be set as fp16.
Args:
network: TensorRT network object.
fp32_layer_names (list): List of layer names. These layers use fp32.
fp16_layer_names (list): List of layer names. These layers use fp16.
"""
is_mixed_precision = False
use_fp16_mode = False
for i, layer in enumerate(network):
if any(s in layer.name for s in fp32_layer_names):
is_mixed_precision = True
layer.precision = trt.float32
layer.set_output_type(0, trt.float32)
logger.info("fp32 index: %d; name: %s", i, layer.name)
elif any(s in layer.name for s in fp16_layer_names):
is_mixed_precision = True
use_fp16_mode = True
layer.precision = trt.float16
layer.set_output_type(0, trt.float16)
logger.info("fp16 index: %d; name: %s", i, layer.name)
else:
pass
# # To ensure int8 optimization is not done for shape layer
# if (not layer.get_output(0).is_shape_tensor):
# layer.precision = trt.int8
# layer.set_output_type(0, trt.int8)
return is_mixed_precision, use_fp16_mode
class EngineBuilder(object):
"""Create a TensorRT engine.
Args:
filename (list): List of filenames to load model from.
max_batch_size (int): Maximum batch size.
vmax_workspace_size (int): Maximum workspace size.
dtype (str): data type ('fp32', 'fp16' or 'int8').
calibrator (:any:`Calibrator`): Calibrator to use for INT8 optimization.
fp32_layer_names (list): List of layer names. These layers use fp32.
fp16_layer_names (list): List of layer names. These layers use fp16.
verbose (bool): Whether to turn on verbose mode.
tensor_scale_dict (dict): Dictionary mapping names to tensor scaling factors.
strict_type(bool): Whether or not to apply strict_type_constraints for INT8 mode.
"""
def __init__(
self,
filenames,
max_batch_size=DEFAULT_MAX_BATCH_SIZE,
max_workspace_size=DEFAULT_MAX_WORKSPACE_SIZE,
dtype="fp32",
calibrator=None,
fp32_layer_names=None,
fp16_layer_names=None,
verbose=False,
tensor_scale_dict=None,
strict_type=False,
):
"""Initialization routine."""
if dtype == "int8":
self._dtype = trt.DataType.INT8
elif dtype == "fp16":
self._dtype = trt.DataType.HALF
elif dtype == "fp32":
self._dtype = trt.DataType.FLOAT
else:
raise ValueError("Unsupported data type: %s" % dtype)
self._strict_type = strict_type
if fp32_layer_names is None:
fp32_layer_names = []
elif dtype != "int8":
raise ValueError(
"FP32 layer precision could be set only when dtype is INT8"
)
if fp16_layer_names is None:
fp16_layer_names = []
elif dtype != "int8":
raise ValueError(
"FP16 layer precision could be set only when dtype is INT8"
)
self._fp32_layer_names = fp32_layer_names
self._fp16_layer_names = fp16_layer_names
self._tensorrt_logger = _create_tensorrt_logger(verbose)
builder = trt.Builder(self._tensorrt_logger)
config = builder.create_builder_config()
trt.init_libnvinfer_plugins(self._tensorrt_logger, "")
if self._dtype == trt.DataType.HALF and not builder.platform_has_fast_fp16:
logger.error("Specified FP16 but not supported on platform.")
raise AttributeError("Specified FP16 but not supported on platform.")
return
if self._dtype == trt.DataType.INT8 and not builder.platform_has_fast_int8:
logger.error("Specified INT8 but not supported on platform.")
raise AttributeError("Specified INT8 but not supported on platform.")
return
if self._dtype == trt.DataType.INT8:
if tensor_scale_dict is None and calibrator is None:
logger.error("Specified INT8 but neither calibrator "
"nor tensor_scale_dict is provided.")
raise AttributeError("Specified INT8 but no calibrator "
"or tensor_scale_dict is provided.")
network = builder.create_network()
self._load_from_files(filenames, network)
builder.max_batch_size = max_batch_size
config.max_workspace_size = max_workspace_size
if self._dtype == trt.DataType.HALF:
config.set_flag(trt.BuilderFlag.FP16)
if self._dtype == trt.DataType.INT8:
config.set_flag(trt.BuilderFlag.INT8)
if tensor_scale_dict is None:
config.int8_calibrator = calibrator
# When use mixed precision, for TensorRT builder:
# strict_type_constraints needs to be True;
# fp16_mode needs to be True if any layer uses fp16 precision.
set_strict_types, set_fp16_mode = \
_set_excluded_layer_precision(
network=network,
fp32_layer_names=self._fp32_layer_names,
fp16_layer_names=self._fp16_layer_names,
)
if set_strict_types:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if set_fp16_mode:
config.set_flag(trt.BuilderFlag.FP16)
else:
# Discrete Volta GPUs don't have int8 tensor cores. So TensorRT might
# not pick int8 implementation over fp16 or even fp32 for V100
# GPUs found on data centers (e.g., AVDC). This will be a discrepancy
# compared to Turing GPUs including d-GPU of DDPX and also Xavier i-GPU
# both of which have int8 accelerators. We set the builder to strict
# mode to avoid picking higher precision implementation even if they are
# faster.
if self._strict_type:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
else:
config.set_flag(trt.BuilderFlag.FP16)
self._set_tensor_dynamic_ranges(
network=network, tensor_scale_dict=tensor_scale_dict
)
engine = builder.build_engine(network, config)
try:
assert engine
except AssertionError:
logger.error("Failed to create engine")
_, _, tb = sys.exc_info()
traceback.print_tb(tb) # Fixed format
tb_info = traceback.extract_tb(tb)
_, line, _, text = tb_info[-1]
raise AssertionError(
"Parsing failed on line {} in statement {}".format(line, text)
)
self._engine = engine
def _load_from_files(self, filenames, network):
"""Load an engine from files."""
raise NotImplementedError()
@staticmethod
def _set_tensor_dynamic_ranges(network, tensor_scale_dict):
"""Set the scaling factors obtained from quantization-aware training.
Args:
network: TensorRT network object.
tensor_scale_dict (dict): Dictionary mapping names to tensor scaling factors.
"""
tensors_found = []
for idx in range(network.num_inputs):
input_tensor = network.get_input(idx)
if input_tensor.name in tensor_scale_dict:
tensors_found.append(input_tensor.name)
cal_scale = tensor_scale_dict[input_tensor.name]
input_tensor.dynamic_range = (-cal_scale, cal_scale)
tensors_in_network = []
for layer in network:
found_all_outputs = True
for idx in range(layer.num_outputs):
output_tensor = layer.get_output(idx)
tensors_in_network.append(output_tensor.name)
if output_tensor.name in tensor_scale_dict:
tensors_found.append(output_tensor.name)
cal_scale = tensor_scale_dict[output_tensor.name]
output_tensor.dynamic_range = (-cal_scale, cal_scale)
else:
found_all_outputs = False
if found_all_outputs:
layer.precision = trt.int8
tensors_in_dict = tensor_scale_dict.keys()
if set(tensors_in_dict) != set(tensors_found):
logger.debug(
"Tensors in scale dictionary but not in network: {}".format(
set(tensors_in_dict) - set(tensors_found)
)
)
logger.debug(
"Tensors in the network but not in scale dictionary: {}".format(
set(tensors_in_network) - set(tensors_found)
)
)
def get_engine(self):
"""Return the engine that was built by the instance."""
return self._engine
@subclass
class CaffeEngineBuilder(EngineBuilder):
"""Create a TensorRT engine from Caffe proto and model files.
Args:
prototxt_filename (str): Caffe model definition.
caffemodel_filename (str): Caffe model snapshot.
input_node_name (str): Name of the input node.
input_dims (list): Dimensions of the input tensor.
output_node_names (list): Names of the output nodes.
"""
def __init__(
self,
prototxt_filename,
caffemodel_filename,
input_node_name,
input_dims,
output_node_names,
*args,
**kwargs
):
"""Init routine."""
self._input_node_name = input_node_name
if not isinstance(output_node_names, list):
output_node_names = [output_node_names]
self._output_node_names = output_node_names
self._input_dims = input_dims
super(CaffeEngineBuilder, self).__init__(
[prototxt_filename, caffemodel_filename], *args, **kwargs
)
@override
def _load_from_files(self, filenames, network):
"""Parse a Caffe model."""
parser = trt.CaffeParser()
prototxt_filename, caffemodel_filename = filenames
blob_name_to_tensor = parser.parse(
prototxt_filename, caffemodel_filename, network, trt.DataType.FLOAT
)
try:
assert blob_name_to_tensor
except AssertionError:
logger.error("Failed to parse caffe model")
_, _, tb = sys.exc_info()
traceback.print_tb(tb)
tb_info = traceback.extract_tb(tb)
_, line, _, text = tb_info[-1]
raise AssertionError(
"Caffe parsing failed on line {} in statement {}".format(line, text)
)
# Mark the outputs.
for l in self._output_node_names:
logger.info("Marking " + l + " as output layer")
t = blob_name_to_tensor.find(str(l))
try:
assert t
except AssertionError:
logger.error("Failed to find output layer {}".format(l))
_, _, tb = sys.exc_info()
traceback.print_tb(tb)
tb_info = traceback.extract_tb(tb)
_, line, _, text = tb_info[-1]
raise AssertionError(
"Caffe parsing failed on line {} in statement {}".format(
line, text
)
)
network.mark_output(t)
@subclass
class UFFEngineBuilder(EngineBuilder):
"""Create a TensorRT engine from a UFF file.
Args:
filename (str): UFF file to create engine from.
input_node_name (str): Name of the input node.
input_dims (list): Dimensions of the input tensor.
output_node_names (list): Names of the output nodes.
"""
def __init__(
self,
filename,
input_node_name,
input_dims,
output_node_names,
*args,
data_format="channels_first",
**kwargs
):
"""Init routine."""
self._input_node_name = input_node_name
if not isinstance(output_node_names, list):
output_node_names = [output_node_names]
self._output_node_names = output_node_names
self._input_dims = input_dims
self._data_format = data_format
super(UFFEngineBuilder, self).__init__([filename], *args, **kwargs)
@override
def _load_from_files(self, filenames, network):
filename = filenames[0]
parser = trt.UffParser()
for key, value in self._input_dims.items():
if self._data_format == "channels_first":
parser.register_input(key, value, trt.UffInputOrder(0))
else:
parser.register_input(key, value, trt.UffInputOrder(1))
for name in self._output_node_names:
parser.register_output(name)
try:
assert parser.parse(filename, network, trt.DataType.FLOAT)
except AssertionError:
logger.error("Failed to parse UFF File")
_, _, tb = sys.exc_info()
traceback.print_tb(tb) # Fixed format
tb_info = traceback.extract_tb(tb)
_, line, _, text = tb_info[-1]
raise AssertionError(
"UFF parsing failed on line {} in statement {}".format(line, text)
)
@subclass
class ONNXEngineBuilder(EngineBuilder):
"""Create a TensorRT engine from an ONNX file.
Args:
filename (str): ONNX file to create engine from.
input_node_name (str): Name of the input node.
input_dims (list): Dimensions of the input tensor.
output_node_names (list): Names of the output nodes.
"""
@override
def __init__(
self,
filenames,
max_batch_size=DEFAULT_MAX_BATCH_SIZE,
min_batch_size=DEFAULT_MIN_BATCH_SIZE,
max_workspace_size=DEFAULT_MAX_WORKSPACE_SIZE,
opt_batch_size=DEFAULT_OPT_BATCH_SIZE,
dtype="fp32",
calibrator=None,
fp32_layer_names=None,
fp16_layer_names=None,
verbose=False,
tensor_scale_dict=None,
dynamic_batch=False,
strict_type=False,
input_dims=None,
):
"""Initialization routine."""
if dtype == "int8":
self._dtype = trt.DataType.INT8
elif dtype == "fp16":
self._dtype = trt.DataType.HALF
elif dtype == "fp32":
self._dtype = trt.DataType.FLOAT
else:
raise ValueError("Unsupported data type: %s" % dtype)
if fp32_layer_names is None:
fp32_layer_names = []
elif dtype != "int8":
raise ValueError(
"FP32 layer precision could be set only when dtype is INT8"
)
if fp16_layer_names is None:
fp16_layer_names = []
elif dtype != "int8":
raise ValueError(
"FP16 layer precision could be set only when dtype is INT8"
)
self._fp32_layer_names = fp32_layer_names
self._fp16_layer_names = fp16_layer_names
self._strict_type = strict_type
self._tensorrt_logger = _create_tensorrt_logger(verbose)
builder = trt.Builder(self._tensorrt_logger)
if self._dtype == trt.DataType.HALF and not builder.platform_has_fast_fp16:
logger.error("Specified FP16 but not supported on platform.")
raise AttributeError("Specified FP16 but not supported on platform.")
return
if self._dtype == trt.DataType.INT8 and not builder.platform_has_fast_int8:
logger.error("Specified INT8 but not supported on platform.")
raise AttributeError("Specified INT8 but not supported on platform.")
return
if self._dtype == trt.DataType.INT8:
if tensor_scale_dict is None and calibrator is None:
logger.error("Specified INT8 but neither calibrator "
"nor tensor_scale_dict is provided.")
raise AttributeError("Specified INT8 but no calibrator "
"or tensor_scale_dict is provided.")
network = builder.create_network(
1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
self._load_from_files([filenames], network)
config = builder.create_builder_config()
if dynamic_batch:
opt_profile = builder.create_optimization_profile()
model_input = network.get_input(0)
input_shape = model_input.shape
input_name = model_input.name
# If input_dims is provided, use this shape instead of model shape
# NOTE: This is to handle fully convolutional models with -1
# for height and width.
if input_dims is not None:
if input_name in input_dims.keys():
input_shape[1] = input_dims[input_name][0]
input_shape[2] = input_dims[input_name][1]
input_shape[3] = input_dims[input_name][2]
else:
raise ValueError("Input name not present in"
"the provided input_dims!")
real_shape_min = (min_batch_size, input_shape[1],
input_shape[2], input_shape[3])
real_shape_opt = (opt_batch_size, input_shape[1],
input_shape[2], input_shape[3])
real_shape_max = (max_batch_size, input_shape[1],
input_shape[2], input_shape[3])
opt_profile.set_shape(input=input_name,
min=real_shape_min,
opt=real_shape_opt,
max=real_shape_max)
config.add_optimization_profile(opt_profile)
config.max_workspace_size = max_workspace_size
if self._dtype == trt.DataType.HALF:
config.flags |= 1 << int(trt.BuilderFlag.FP16)
if self._dtype == trt.DataType.INT8:
config.flags |= 1 << int(trt.BuilderFlag.INT8)
if tensor_scale_dict is None:
config.int8_calibrator = calibrator
# When use mixed precision, for TensorRT builder:
# strict_type_constraints needs to be True;
# fp16_mode needs to be True if any layer uses fp16 precision.
strict_type_constraints, fp16_mode = \
_set_excluded_layer_precision(
network=network,
fp32_layer_names=self._fp32_layer_names,
fp16_layer_names=self._fp16_layer_names,
)
if strict_type_constraints:
config.flags |= 1 << int(trt.BuilderFlag.STRICT_TYPES)
if fp16_mode:
config.flags |= 1 << int(trt.BuilderFlag.FP16)
else:
# Discrete Volta GPUs don't have int8 tensor cores. So TensorRT might
# not pick int8 implementation over fp16 or even fp32 for V100
# GPUs found on data centers (e.g., AVDC). This will be a discrepancy
# compared to Turing GPUs including d-GPU of DDPX and also Xavier i-GPU
# both of which have int8 accelerators. We set the builder to strict
# mode to avoid picking higher precision implementation even if they are
# faster.
if self._strict_type:
config.flags |= 1 << int(trt.BuilderFlag.STRICT_TYPES)
else:
config.flags |= 1 << int(trt.BuilderFlag.FP16)
self._set_tensor_dynamic_ranges(
network=network, tensor_scale_dict=tensor_scale_dict
)
engine = builder.build_engine(network, config)
try:
assert engine
except AssertionError:
logger.error("Failed to create engine")
_, _, tb = sys.exc_info()
traceback.print_tb(tb) # Fixed format
tb_info = traceback.extract_tb(tb)
_, line, _, text = tb_info[-1]
raise AssertionError(
"Parsing failed on line {} in statement {}".format(line, text)
)
self._engine = engine
@override
def _load_from_files(self, filenames, network):
filename = filenames[0]
parser = trt.OnnxParser(network, self._tensorrt_logger)
with open(filename, "rb") as model_file:
ret = parser.parse(model_file.read())
for index in range(parser.num_errors):
print(parser.get_error(index))
assert ret, 'ONNX parser failed to parse the model.'
# Note: there might be an issue when running inference on TRT:
# [TensorRT] ERROR: Network must have at least one output.
# See https://github.com/NVIDIA/TensorRT/issues/183.
# Just keep a note in case we have this issue again.
def keras_to_tensorrt(
model,
input_dims,
output_node_names=None,
dtype="fp32",
max_workspace_size=DEFAULT_MAX_WORKSPACE_SIZE,
max_batch_size=DEFAULT_MAX_BATCH_SIZE,
calibration_data_filename=None,
calibration_cache_filename=None,
calibration_n_batches=16,
calibration_batch_size=16,
fp32_layer_names=None,
fp16_layer_names=None,
parser="uff",
verbose=False,
custom_objects=None,
tensor_scale_dict=None,
):
"""Create a TensorRT engine out of a Keras model.
NOTE: the current Keras session is cleared in this function.
Do not use this function during training.
Args:
model (Model): Keras model to export.
output_filename (str): File to write exported model to.
in_dims (list or dict): List of input dimensions, or a dictionary of
input_node_name:input_dims pairs in the case of multiple inputs.
output_node_names (list of str): List of model output node names as
returned by model.layers[some_idx].get_output_at(0).name.split(':')[0].
If not provided, then the last layer is assumed to be the output node.
max_workspace_size (int): Maximum TensorRT workspace size.
max_batch_size (int): Maximum TensorRT batch size.
calibration_data_filename (str): Calibratio data file to use.
calibration_cache_filename (str): Calibration cache file to write to.
calibration_n_batches (int): Number of calibration batches.
calibration_batch_size (int): Calibration batch size.
fp32_layer_names (list): Fp32 layers names. It is useful only when dtype is int8.
fp16_layer_names (list): Fp16 layers names. It is useful only when dtype is int8.
parser='uff' (str): Parser ('uff' or 'caffe') to use for intermediate representation.
verbose (bool): Whether to turn ON verbose messages.
custom_objects (dict): Dictionary mapping names (strings) to custom
classes or functions to be considered during deserialization for export.
tensor_scale_dict (dict): Dictionary mapping names to tensor scaling factors.
Returns:
The names of the input and output nodes. These must be
passed to the TensorRT optimization tool to identify
input and output blobs. If multiple output nodes are specified,
then a list of output node names is returned.
"""
if dtype == "int8":
if calibration_data_filename is None:
raise ValueError(
"A calibration data file must be provided for INT8 export."
)
calibrator = Calibrator(
data_filename=calibration_data_filename,
cache_filename=calibration_cache_filename,
n_batches=calibration_n_batches,
batch_size=calibration_batch_size,
)
else:
calibrator = None
# Custom keras objects are only supported with UFF parser.
if custom_objects is not None:
assert (
parser == "uff"
), "Custom keras objects are only supported with UFF parser."
if parser == "uff":
# First, convert model to UFF.
os_handle, tmp_uff_filename = tempfile.mkstemp(suffix=".uff")
os.close(os_handle)
input_node_name, output_node_names, _ = keras_to_uff(
model,
tmp_uff_filename,
output_node_names,
custom_objects=custom_objects,
)
if not isinstance(input_dims, dict):
input_dims = {input_node_name: input_dims}
logger.info("Model output names: %s", str(output_node_names))
builder = UFFEngineBuilder(
tmp_uff_filename,
input_node_name,
input_dims,
output_node_names,
max_batch_size=max_batch_size,
max_workspace_size=max_workspace_size,
dtype=dtype,
fp32_layer_names=fp32_layer_names,
fp16_layer_names=fp16_layer_names,
verbose=verbose,
calibrator=calibrator,
tensor_scale_dict=tensor_scale_dict,
)
# Delete temp file.
os.remove(tmp_uff_filename)
elif parser == "caffe":
# First, convert to Caffe.
os_handle, tmp_proto_filename = tempfile.mkstemp(suffix=".prototxt")
os.close(os_handle)
os_handle, tmp_caffemodel_filename = tempfile.mkstemp(suffix=".caffemodel")
os.close(os_handle)
input_node_name, output_node_names = keras_to_caffe(
model, tmp_proto_filename, tmp_caffemodel_filename, output_node_names
)
builder = CaffeEngineBuilder(
tmp_proto_filename,
tmp_caffemodel_filename,
input_node_name,
input_dims,
output_node_names,
max_batch_size=max_batch_size,
max_workspace_size=max_workspace_size,
dtype=dtype,
verbose=verbose,
calibrator=calibrator,
tensor_scale_dict=tensor_scale_dict,
)
# Delete temp files.
os.remove(tmp_proto_filename)
os.remove(tmp_caffemodel_filename)
elif parser == "onnx":
# First, convert model to ONNX.
os_handle, tmp_onnx_filename = tempfile.mkstemp(suffix=".onnx")
os.close(os_handle)
input_node_name, output_node_names, _ = keras_to_onnx(
model, tmp_onnx_filename, custom_objects=custom_objects,
target_opset=12
)
if not isinstance(input_dims, dict):
input_dims = {input_node_name: input_dims}
logger.info("Model output names: %s", str(output_node_names))
builder = ONNXEngineBuilder(
tmp_onnx_filename,
max_batch_size=max_batch_size,
max_workspace_size=max_workspace_size,
dtype=dtype,
fp32_layer_names=fp32_layer_names,
fp16_layer_names=fp16_layer_names,
verbose=verbose,
calibrator=calibrator,
tensor_scale_dict=tensor_scale_dict,
)
# Delete temp file.
os.remove(tmp_onnx_filename)
else:
raise ValueError("Unknown parser: %s" % parser)
engine = Engine(builder.get_engine())
return input_node_name, output_node_names, engine
def load_tensorrt_engine(filename, verbose=False):
"""Load a serialized TensorRT engine.
Args:
filename (str): Path to the serialized engine.
verbose (bool): Whether to turn ON verbose mode.
"""
tensorrt_logger = _create_tensorrt_logger(verbose)
if not os.path.isfile(filename):
raise ValueError("File does not exist")
with trt.Runtime(tensorrt_logger) as runtime, open(filename, "rb") as inpf:
tensorrt_engine = runtime.deserialize_cuda_engine(inpf.read())
engine = Engine(tensorrt_engine)
return engine
| tao_tensorflow1_backend-main | nvidia_tao_tf1/core/export/_tensorrt.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modulus export APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import logging
import h5py
import numpy as np
"""Logger for data export APIs."""
logger = logging.getLogger(__name__)
class TensorFile(io.RawIOBase):
"""Class to read/write multiple tensors to a file.
The underlying implementation using an HDF5 database
to store data.
Note: this class does not support multiple writers to
the same file.
Args:
filename (str): path to file.
mode (str): mode to open file in.
r Readonly, file must exist
r+ Read/write, file must exist
w Create file, truncate if exists
w- Create file, fail if exists
a Read/write if exists, create otherwise (default)
enforce_same_shape (bool): whether to enforce that all tensors be the same shape.
"""
DEFAULT_ARRAY_KEY = "_tensorfile_array_key_"
GROUP_NAME_PREFIX = "_tensorfile_array_key_"
def __init__(
self, filename, mode="a", enforce_same_shape=True, *args, **kwargs
): # pylint: disable=W1113
"""Init routine."""
super(TensorFile, self).__init__(*args, **kwargs)
logger.debug("Opening %s with mode=%s", filename, mode)
self._enforce_same_shape = enforce_same_shape
self._mode = mode
# Open or create the HDF5 file.
self._db = h5py.File(filename, mode)
if "count" not in self._db.attrs:
self._db.attrs["count"] = 0
if "r" in mode:
self._cursor = 0
else:
self._cursor = self._db.attrs["count"]
def _get_group_name(cls, cursor):
"""Return the name of the H5 dataset to create, given a cursor index."""
return "%s_%d" % (cls.GROUP_NAME_PREFIX, cursor)
def _write_data(self, group, data):
for key, value in data.items():
if isinstance(value, dict):
self._write_data(group.create_group(key), value)
elif isinstance(value, np.ndarray):
if self._enforce_same_shape:
if "shape" not in self._db.attrs:
self._db.attrs["shape"] = value.shape
else:
expected_shape = tuple(self._db.attrs["shape"].tolist())
if expected_shape != value.shape:
raise ValueError(
"Shape mismatch: %s v.s. %s"
% (str(expected_shape), str(value.shape))
)
group.create_dataset(key, data=value, compression="gzip")
else:
raise ValueError(
"Only np.ndarray or dicts can be written into a TensorFile."
)
def close(self):
"""Close this file."""
self._db.close()
# For python2.
def next(self):
"""Return next element."""
return self.__next__()
# For python3.
def __next__(self):
"""Return next element."""
if self._cursor < self._db.attrs["count"]:
return self.read()
raise StopIteration()
def _read_data(self, group):
if isinstance(group, h5py.Group):
data = {key: self._read_data(value) for key, value in group.items()}
else:
data = group[()]
return data
def read(self):
"""Read from current cursor.
Return array assigned to current cursor, or ``None`` to indicate
the end of the file.
"""
if not self.readable():
raise IOError("Instance is not readable.")
group_name = self._get_group_name(self._cursor)
if group_name in self._db:
self._cursor += 1
group = self._db[group_name]
data = self._read_data(group)
if list(data.keys()) == [self.DEFAULT_ARRAY_KEY]:
# The only key in this group is the default key.
# Return the numpy array directly.
return data[self.DEFAULT_ARRAY_KEY]
return data
return None
def readable(self):
"""Return whether this instance is readable."""
return self._mode in ["r", "r+", "a"]
def seekable(self):
"""Return whether this instance is seekable."""
return True
def seek(self, n):
"""Move cursor."""
self._cursor = min(n, self._db.attrs["count"])
return self._cursor
def tell(self):
"""Return current cursor index."""
return self._cursor
def truncate(self, n):
"""Truncation is not supported."""
raise IOError("Truncate operation is not supported.")
def writable(self):
"""Return whether this instance is writable."""
return self._mode in ["r+", "w", "w-", "a"]
def write(self, data):
"""Write a Numpy array or a dictionary of numpy arrays into file."""
if not self.writable():
raise IOError("Instance is not writable.")
if isinstance(data, np.ndarray):
data = {self.DEFAULT_ARRAY_KEY: data}
group_name = self._get_group_name(self._cursor)
# Delete existing instance of datasets at this cursor position.
if group_name in self._db:
del self._db[group_name]
group = self._db.create_group(group_name)
self._write_data(group, data)
self._cursor += 1
if self._cursor > self._db.attrs["count"]:
self._db.attrs["count"] = self._cursor
| tao_tensorflow1_backend-main | nvidia_tao_tf1/core/export/data.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import random
import sys
import tempfile
"""Root logger for export app."""
logger = logging.getLogger(__name__) # noqa
import keras
import mock
import numpy as np
try:
import pycuda.driver as cuda
except ImportError:
logger.warning(
"Failed to import CUDA package. TRT inference testing will not be available."
)
cuda = None
from nvidia_tao_tf1.core.export import (
keras_to_caffe,
keras_to_onnx,
keras_to_tensorrt,
keras_to_uff,
TensorFile
)
from nvidia_tao_tf1.core.export._tensorrt import _set_excluded_layer_precision
from nvidia_tao_tf1.core.export.app import get_model_input_dtype
from nvidia_tao_tf1.core.models.templates.conv_gru_2d_export import ConvGRU2DExport
from nvidia_tao_tf1.core.templates.helnet import HelNet
import nvidia_tao_tf1.core.utils
import pytest
import tensorflow as tf
try:
import tensorrt as trt
except ImportError:
logger.warning(
"Failed to import TRT package. TRT inference testing will not be available."
)
trt = None
import third_party.keras.tensorflow_backend
_onnx_supported = False
if sys.version_info >= (3, 0):
_onnx_supported = True
class TestModelExport(object):
"""Main class for model export tests."""
def common(
self,
tmpdir,
model,
nlayers,
data_format,
use_batch_norm,
input_shape,
export_format,
add_transpose_conv=False,
add_reshape=False,
add_dropout=False,
add_dense=False,
add_maxpool2D=False,
add_concat_layer=False,
model_in_model=False,
multiple_outputs=False,
add_unnecessary_outputs=False,
intermediate_output=False,
dilation_rate=None,
add_conv_gru=False,
):
inputs = keras.layers.Input(shape=input_shape)
model = model(
nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format
)
# Custom keras layers to be exported.
custom_keras_layers = dict()
# Layers that have additional state input/output.
layers_with_state_io = []
if add_dropout:
x = model.outputs[0]
x = keras.layers.Dropout(rate=0.5)(x)
x = keras.layers.Conv2D(32, (3, 3))(x)
model = keras.models.Model(inputs=inputs, outputs=x)
if add_transpose_conv:
model = nvidia_tao_tf1.core.models.templates.utils.add_deconv_head(
model, inputs, nmaps=3, upsampling=4, data_format=data_format
)
if add_maxpool2D:
x = model.outputs[0]
x = keras.layers.MaxPooling2D()(x)
model = keras.models.Model(inputs=inputs, outputs=x)
if add_reshape:
x = model.outputs[0]
x = keras.layers.Reshape((-1, 16))(x)
model = keras.models.Model(inputs=inputs, outputs=x)
if add_dense:
x = model.outputs[0]
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(10, activation="tanh")(x)
model = keras.models.Model(inputs=inputs, outputs=x)
if add_concat_layer:
x = model.outputs[0]
# First branch.
num_filters_x1 = 4
x1 = keras.layers.Conv2D(
filters=num_filters_x1,
kernel_size=[1, 1],
strides=(1, 1),
padding="same",
data_format=data_format,
dilation_rate=(1, 1),
activation="sigmoid",
name="conv2d_x1",
)(x)
# Second branch.
num_filters_x2 = 2
x2 = keras.layers.Conv2D(
filters=num_filters_x2,
kernel_size=[1, 1],
strides=(1, 1),
padding="same",
dilation_rate=(1, 1),
activation="relu",
name="conv2d_x2",
)(x)
# Merge branches.
concat_axis = 1 if data_format == "channels_first" else -1
x = keras.layers.Concatenate(axis=concat_axis, name="concat")([x1, x2])
# Add extra layer on top.
x = keras.layers.Conv2D(
filters=num_filters_x1 + num_filters_x2,
kernel_size=[1, 1],
strides=(1, 1),
padding="same",
data_format=data_format,
activation="sigmoid",
name="conv2d_output",
)(x)
# One final layer.
x = keras.layers.Conv2D(
filters=1,
kernel_size=[1, 1],
strides=(1, 1),
padding="same",
data_format=data_format,
name="net_output",
)(x)
model = keras.models.Model(inputs=inputs, outputs=x)
if dilation_rate is not None:
x = model.outputs[0]
# Add a Conv2D layer with dilation.
y = keras.layers.Conv2D(
filters=1,
kernel_size=[1, 1],
strides=(1, 1),
dilation_rate=dilation_rate,
data_format=data_format,
)(x)
model = keras.models.Model(inputs=inputs, outputs=y)
if model_in_model:
outer_inputs = keras.layers.Input(shape=input_shape)
inner_model = model
model = keras.models.Model(
inputs=outer_inputs, outputs=inner_model(outer_inputs)
)
if add_conv_gru:
x = model.outputs[0]
# Add the conv GRU layer.
y = ConvGRU2DExport(
model_sequence_length_in_frames=1,
input_sequence_length_in_frames=1,
state_scaling=0.9,
input_shape=x.shape.as_list(),
initial_state_shape=x.shape.as_list(),
spatial_kernel_height=1,
spatial_kernel_width=1,
kernel_regularizer=None,
bias_regularizer=None,
is_stateful=True,
)(x)
model = keras.models.Model(inputs=inputs, outputs=y)
# Update custom layers dictionary for passing to the export code.
custom_keras_layers.update({"ConvGRU2DExport": ConvGRU2DExport})
# Add this layer as a state io layer.
layers_with_state_io.append(model.layers[-1])
if multiple_outputs:
y1 = model.outputs[0]
y2 = keras.layers.Conv2D(32, (3, 3))(y1)
outputs = [y1, y2]
if add_unnecessary_outputs:
# Add y3-y5 to the Keras model outputs. These should be ignored by the
# exporters as only y1 and y2 are added to output_node_names.
y3 = keras.layers.Conv2D(16, (3, 3))(y1)
y4 = keras.layers.Conv2D(16, (3, 3))(y2)
y5 = keras.layers.Conv2D(16, (3, 3))(y4)
outputs += [y3, y4, y5]
model = keras.models.Model(inputs=inputs, outputs=outputs)
keras_model_file = os.path.join(str(tmpdir), "model.hdf5")
with keras.utils.CustomObjectScope(custom_keras_layers):
model.save(keras_model_file)
keras.backend.clear_session()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
keras.backend.set_session(tf.Session(config=config))
model = keras.models.load_model(keras_model_file, custom_keras_layers)
if intermediate_output:
# Get a preceding layer's activation as the output.
output_layers = [model.layers[-4]]
else:
# Use keras model outputs, but only get up to two outputs to test exporting a
# model that has unnecessary extra outputs as well.
output_layers = model._output_layers[:2]
# Get output node names to be exported.
output_node_names = []
for layer in output_layers:
if export_format == "uff":
output_node_names.append(layer.get_output_at(0).name.split(":")[0])
elif export_format == "onnx":
# keras2onnx will drop the part after slash for output tensor names
output_node_names.append(layer.get_output_at(0).name.split("/")[0])
elif export_format == "caffe":
# Convert Tensor names to Caffe layer names.
if (
hasattr(layer, "activation")
and layer.activation.__name__ != "linear"
):
output_node_name = "%s/%s" % (
layer.name,
layer.activation.__name__.capitalize(),
)
else:
output_node_name = layer.name
output_node_names.append(output_node_name)
test_input_shape = (None,) + input_shape
# For the stateful layers only uff export is supported.
if export_format in ["uff", "onnx"]:
if layers_with_state_io:
test_input_shape = [test_input_shape]
for layer in layers_with_state_io:
if export_format == "uff":
stateful_node_name = layer.get_output_at(0).name.split(":")[0]
else:
stateful_node_name = layer.get_output_at(0).name
if stateful_node_name not in output_node_names:
output_node_names.append(stateful_node_name)
test_input_shape.append(layer.input_shape)
# Only specify output node names to exporter if we need to deviate from keras model outputs.
output_node_names_to_export = (
output_node_names
if add_unnecessary_outputs or intermediate_output
else None
)
if export_format == "uff":
uff_filename = os.path.join(str(tmpdir), "model.uff")
in_tensor_name, out_tensor_name, export_input_dims = keras_to_uff(
model,
uff_filename,
output_node_names=output_node_names_to_export,
custom_objects=custom_keras_layers,
)
assert os.path.isfile(uff_filename)
assert test_input_shape == export_input_dims
elif export_format == "onnx":
onnx_filename = os.path.join(str(tmpdir), "model.onnx")
in_tensor_name, out_tensor_name, export_input_dims = keras_to_onnx(
model, onnx_filename, custom_objects=custom_keras_layers
)
assert os.path.isfile(onnx_filename)
# onnx model has explicit batch size, hence dim[0] cannot match that in Keras
assert list(test_input_shape)[1:] == list(export_input_dims)[1:]
elif export_format == "caffe":
proto_filename = os.path.join(str(tmpdir), "model.proto")
snapshot_filename = os.path.join(str(tmpdir), "model.caffemodel")
in_tensor_name, out_tensor_name = keras_to_caffe(
model,
proto_filename,
snapshot_filename,
output_node_names=output_node_names_to_export,
)
assert os.path.isfile(proto_filename)
assert os.path.isfile(snapshot_filename)
# TensorRT requires all input_layers to be 4-dimensional.
# This check ensures that the caffe model can be converted to TensorRT.
assert all(
[
len(input_layer.input_shape) == 4
for input_layer in model._input_layers
]
)
else:
raise ValueError("Unknown format: %s" % export_format)
# For the stateful layers only uff export is supported.
if layers_with_state_io and export_format == "uff":
assert in_tensor_name == [
model.layers[0].get_output_at(0).name.split(":")[0]
] + [layer.state_input_name for layer in layers_with_state_io]
elif export_format == "onnx":
assert in_tensor_name == model.layers[0].get_output_at(0).name.split(":")[0]
else:
# Make sure input/output tensor names were returned correctly.
assert in_tensor_name == model.layers[0].get_output_at(0).name.split(":")[0]
# Exporter gives a list of output names only if there are multiple outputs.
if len(output_node_names) == 1:
output_node_names = output_node_names[0]
if model_in_model and export_format in ["uff"]:
# In the case of a model-in-model architecture, output tensor names
# are registered in the namespace of the inner model.
output_node_names = "{}/{}".format(inner_model.name, output_node_names)
elif model_in_model and export_format in ["onnx"]:
output_node_names = inner_model.name
if isinstance(out_tensor_name, list):
for idx in range(len(out_tensor_name)):
assert out_tensor_name[idx] in output_node_names[idx]
else:
assert out_tensor_name in output_node_names
@pytest.mark.parametrize(
"model, nlayers, data_format, use_batch_norm, input_shape," "export_format",
[
(HelNet, 6, "channels_first", True, (3, 256, 256), "uff"),
(HelNet, 6, "channels_first", False, (3, 256, 256), "uff"),
(HelNet, 10, "channels_last", True, (64, 64, 3), "uff"),
(HelNet, 6, "channels_first", True, (3, 128, 128), "caffe"),
(HelNet, 10, "channels_first", False, (3, 256, 256), "caffe"),
(HelNet, 6, "channels_first", True, (3, 128, 128), "onnx"),
(HelNet, 10, "channels_first", False, (3, 256, 256), "onnx"),
],
)
def test_export(
self,
tmpdir,
model,
nlayers,
data_format,
use_batch_norm,
input_shape,
export_format,
):
"""Test that our model exports to the destination format."""
if export_format == "onnx" and not _onnx_supported:
return
keras.layers.Input(shape=input_shape)
self.common(
tmpdir,
model,
nlayers,
data_format,
use_batch_norm,
input_shape,
export_format,
)
@pytest.mark.parametrize(
"model, nlayers, data_format, use_batch_norm, input_shape," "export_format",
[
(HelNet, 6, "channels_first", True, (3, 128, 256), "uff"),
(HelNet, 6, "channels_first", True, (3, 128, 256), "caffe"),
(HelNet, 6, "channels_first", True, (3, 128, 256), "onnx"),
],
)
def test_with_conv_transpose_head(
self,
tmpdir,
model,
nlayers,
data_format,
use_batch_norm,
input_shape,
export_format,
):
"""Test that our model exports to PB and UFF."""
if export_format == "onnx" and not _onnx_supported:
return
self.common(
tmpdir,
model,
nlayers,
data_format,
use_batch_norm,
input_shape,
export_format,
add_transpose_conv=True,
)
@pytest.mark.parametrize(
"model, nlayers, data_format, use_batch_norm, input_shape," "export_format",
[
(HelNet, 6, "channels_first", True, (3, 128, 256), "uff"),
(HelNet, 6, "channels_first", True, (3, 128, 256), "caffe"),
(HelNet, 6, "channels_first", True, (3, 128, 256), "onnx"),
],
)
def test_with_reshape(
self,
tmpdir,
model,
nlayers,
data_format,
use_batch_norm,
input_shape,
export_format,
):
"""Test that our model exports to PB and UFF."""
if export_format == "onnx" and not _onnx_supported:
return
self.common(
tmpdir,
model,
nlayers,
data_format,
use_batch_norm,
input_shape,
export_format,
add_reshape=True,
)
@pytest.mark.parametrize(
"model, nlayers, data_format, use_batch_norm, input_shape," "export_format",
[
(HelNet, 6, "channels_first", True, (3, 128, 256), "uff"),
(HelNet, 6, "channels_first", True, (3, 128, 256), "caffe"),
(HelNet, 6, "channels_first", True, (3, 128, 256), "onnx"),
],
)
def test_with_dropout(
self,
tmpdir,
model,
nlayers,
data_format,
use_batch_norm,
input_shape,
export_format,
):
"""Test that our model with dropout exports to PB and UFF."""
if export_format == "onnx" and not _onnx_supported:
return
self.common(
tmpdir,
model,
nlayers,
data_format,
use_batch_norm,
input_shape,
export_format,
add_dropout=True,
)
@pytest.mark.parametrize(
"model, nlayers, data_format, use_batch_norm, input_shape," "export_format",
[
(HelNet, 6, "channels_first", True, (3, 128, 256), "uff"),
(HelNet, 6, "channels_first", True, (3, 128, 256), "caffe"),
(HelNet, 6, "channels_first", True, (3, 128, 256), "onnx"),
],
)
def test_with_dense(
self,
tmpdir,
model,
nlayers,
data_format,
use_batch_norm,
input_shape,
export_format,
):
"""Test that our model with dense layer exports to PB and UFF."""
if export_format == "onnx" and not _onnx_supported:
return
self.common(
tmpdir,
model,
nlayers,
data_format,
use_batch_norm,
input_shape,
export_format,
add_dense=True,
)
@pytest.mark.parametrize(
"model, nlayers, data_format, use_batch_norm, input_shape," "export_format",
[
(HelNet, 6, "channels_first", True, (3, 128, 256), "uff"),
(HelNet, 6, "channels_first", True, (3, 128, 256), "caffe"),
(HelNet, 6, "channels_first", True, (3, 128, 256), "onnx"),
],
)
def test_with_branches(
self,
tmpdir,
model,
nlayers,
data_format,
use_batch_norm,
input_shape,
export_format,
):
"""Test that our model with branches and a concatenation layer exports to Caffe and UFF."""
if export_format == "onnx" and not _onnx_supported:
return
self.common(
tmpdir,
model,
nlayers,
data_format,
use_batch_norm,
input_shape,
export_format,
add_concat_layer=True,
)
@pytest.mark.parametrize(
"model, nlayers, data_format, use_batch_norm, input_shape," "export_format",
[
(HelNet, 6, "channels_first", True, (3, 128, 256), "uff"),
(HelNet, 6, "channels_first", True, (3, 128, 256), "caffe"),
(HelNet, 6, "channels_first", True, (3, 128, 256), "onnx"),
],
)
def test_with_maxpool2D(
self,
tmpdir,
model,
nlayers,
data_format,
use_batch_norm,
input_shape,
export_format,
):
"""Test that our model with 2D max pooling exports to PB and UFF."""
if export_format == "onnx" and not _onnx_supported:
return
self.common(
tmpdir,
model,
nlayers,
data_format,
use_batch_norm,
input_shape,
export_format,
add_maxpool2D=True,
)
@pytest.mark.parametrize(
"model, nlayers, data_format, use_batch_norm, input_shape, "
"dilation_rate, export_format",
[
(HelNet, 6, "channels_first", True, (3, 128, 256), (2, 2), "uff"),
(HelNet, 6, "channels_first", True, (3, 128, 256), (2, 2), "caffe"),
(HelNet, 6, "channels_first", True, (3, 128, 256), (2, 2), "onnx"),
],
)
def test_with_dilation(
self,
tmpdir,
model,
nlayers,
data_format,
use_batch_norm,
input_shape,
dilation_rate,
export_format,
):
"""Test that our model with dilation exports to PB and UFF."""
if export_format == "onnx" and not _onnx_supported:
return
self.common(
tmpdir,
model,
nlayers,
data_format,
use_batch_norm,
input_shape,
export_format,
dilation_rate=dilation_rate,
)
@pytest.mark.parametrize(
"model, nlayers, data_format, use_batch_norm, input_shape," "export_format",
[
(HelNet, 6, "channels_first", True, (3, 128, 256), "uff"),
(HelNet, 6, "channels_first", True, (3, 128, 256), "caffe"),
(HelNet, 6, "channels_first", True, (3, 128, 256), "onnx"),
],
)
def test_model_in_model(
self,
tmpdir,
model,
nlayers,
data_format,
use_batch_norm,
input_shape,
export_format,
):
"""Test that we can export a model-in-model type of architecture."""
if export_format == "onnx" and not _onnx_supported:
return
self.common(
tmpdir,
model,
nlayers,
data_format,
use_batch_norm,
input_shape,
export_format,
model_in_model=True,
)
@pytest.mark.parametrize(
"model, nlayers, data_format, use_batch_norm, input_shape," "export_format",
[
(HelNet, 6, "channels_first", True, (3, 128, 256), "uff"),
(HelNet, 6, "channels_first", True, (3, 128, 256), "caffe"),
(HelNet, 6, "channels_first", True, (3, 128, 256), "onnx"),
],
)
def test_multiple_outputs(
self,
tmpdir,
model,
nlayers,
data_format,
use_batch_norm,
input_shape,
export_format,
):
"""Test that we can export a model with multiple outputs."""
if export_format == "onnx" and not _onnx_supported:
return
self.common(
tmpdir,
model,
nlayers,
data_format,
use_batch_norm,
input_shape,
export_format,
multiple_outputs=True,
)
@pytest.mark.parametrize(
"model, nlayers, data_format, use_batch_norm, input_shape," "export_format",
[
(HelNet, 6, "channels_first", True, (3, 128, 256), "uff"),
(HelNet, 6, "channels_first", True, (3, 128, 256), "caffe"),
# TODO(bhupinders): Right now ONNX cannot pick and chose output nodes to
# export, it has to export the entire keras model.
# (HelNet, 6, "channels_first", True, (3, 128, 256), "onnx"),
],
)
def test_unnecessary_outputs(
self,
tmpdir,
model,
nlayers,
data_format,
use_batch_norm,
input_shape,
export_format,
):
"""Test that we can export only given output nodes, ignoring other outputs."""
if export_format == "onnx" and not _onnx_supported:
return
self.common(
tmpdir,
model,
nlayers,
data_format,
use_batch_norm,
input_shape,
export_format,
multiple_outputs=True,
add_unnecessary_outputs=True,
)
@pytest.mark.parametrize(
"model, nlayers, data_format, use_batch_norm, input_shape," "export_format",
[
(HelNet, 6, "channels_first", True, (3, 128, 256), "uff"),
(HelNet, 6, "channels_first", True, (3, 128, 256), "caffe"),
# TODO(bhupinders): ONNX converter can't manipulate/extract intermediate
# outputs.
# (HelNet, 6, "channels_first", True, (3, 128, 256), "onnx"),
],
)
def test_intermediate_output(
self,
tmpdir,
model,
nlayers,
data_format,
use_batch_norm,
input_shape,
export_format,
):
"""Test that we can export subgraphs of models."""
if export_format == "onnx" and not _onnx_supported:
return
self.common(
tmpdir,
model,
nlayers,
data_format,
use_batch_norm,
input_shape,
export_format,
intermediate_output=True,
)
# @pytest.mark.parametrize(
# "output_fn, model, nlayers, data_format, use_batch_norm, input_shape,"
# "output_format",
# [
# ("default", HelNet, 6, "channels_first", False, (3, 256, 256), "uff"),
# ("default", HelNet, 6, "channels_first", False, (3, 256, 256), "onnx"),
# ],
# )
# @pytest.mark.script_launch_mode("subprocess")
# def test_export_app(
# self,
# script_runner,
# tmpdir,
# output_fn,
# model,
# nlayers,
# data_format,
# use_batch_norm,
# input_shape,
# output_format,
# ):
# """Test the export application.
# Just make sure a model file is generated.
# """
# if output_format == "onnx" and not _onnx_supported:
# return
# model_filename = os.path.join(str(tmpdir), "model.h5")
# if output_fn == "default":
# extra_args = []
# suffix = ".%s" % output_format
# output_filename = os.path.join(str(tmpdir), "model.h5" + suffix)
# else:
# output_filename = os.path.join(str(tmpdir), output_fn)
# extra_args = ["--output", output_filename]
# extra_args.extend(["--format", output_format])
# inputs = keras.layers.Input(shape=input_shape)
# model = model(
# nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format
# )
# model.save(model_filename)
# env = os.environ.copy()
# # If empty, then "import pycuda.autoinit" will not work saying
# # "pycuda._driver.RuntimeError: cuInit failed: no CUDA-capable device is detected"
# # This is inside a protected "try" in tensorrt/lite/engine.py, so you'll only see an error
# # saying to make sure pycuda is installed, which is wrong.
# # TODO(xiangbok): restore to no devices
# env["CUDA_VISIBLE_DEVICES"] = "0"
# script = "app.py"
# # Path adjustment for bazel tests
# if os.path.exists(os.path.join("nvidia_tao_tf1/core/export", script)):
# script = os.path.join("nvidia_tao_tf1/core/export", script)
# ret = script_runner.run(script, model_filename, env=env, *extra_args)
# assert ret.success, "Process returned error: %s error trace: %s" % (
# ret.success,
# ret.stderr,
# )
# assert os.path.isfile(output_filename)
@pytest.mark.parametrize(
"model, nlayers, data_format, use_batch_norm, input_shape," "export_format",
[(HelNet, 6, "channels_first", True, (3, 128, 256), "uff")],
)
def test_with_conv_gru_head(
self,
tmpdir,
model,
nlayers,
data_format,
use_batch_norm,
input_shape,
export_format,
):
"""Test that our model exports to PB and UFF."""
self.common(
tmpdir,
model,
nlayers,
data_format,
use_batch_norm,
input_shape,
export_format,
add_conv_gru=True,
)
class TestDataExport(object):
"""A class to test exporting tensors to a file."""
@pytest.mark.parametrize(
"nbatches, batch_size, input_shape, dtype, bit_depth",
[(10, 6, (3, 64, 64), np.float32, 4)],
)
def test(self, tmpdir, nbatches, batch_size, input_shape, dtype, bit_depth):
"""Main test.
Args:
tmpdir (dir object): temp dir provided by pytest fixture.
nbatches (int): number of batches to use.
batch_size (int): number of samples per batch.
input_shape (list): shape of each sample.
dtype (dtype): data type to use.
bit_depth (int): size (in bytes) of each element.
"""
filename = os.path.join(str(tmpdir), "tensors.h5")
batch_shape = (batch_size,) + input_shape
dataset_shape = (nbatches,) + batch_shape
dataset = np.random.random_sample(dataset_shape).astype(dtype)
# Fill some batches with constants to test compression.
dataset[0].fill(0)
dataset[-1].fill(-1)
# Dump data to file.
with TensorFile(filename, "w") as f:
for i in range(nbatches):
f.write(dataset[i])
assert os.path.isfile(filename)
# Make sure some sort of compression happened.
file_size = os.path.getsize(filename)
assert file_size < dataset.size * bit_depth
# Read back through sequential accesses.
with TensorFile(filename, "r") as f:
for batch_index, batch in enumerate(f):
assert batch.dtype == dtype
assert np.allclose(batch, dataset[batch_index])
# Read back through random accesses.
with TensorFile(filename, "r") as f:
indices = list(range(nbatches))
random.shuffle(indices)
for idx in indices:
f.seek(idx)
batch = f.read()
assert batch.dtype == dtype
assert np.allclose(batch, dataset[idx])
@pytest.mark.parametrize(
"nbatches, batch_size, keys, input_shapes, dtype",
[(4, 3, ["o1", "o2"], [(1, 24, 48), (4, 24, 48)], np.float32)],
)
def test_dict(self, tmpdir, nbatches, batch_size, keys, input_shapes, dtype):
"""Test that we can read/write dictionaries of numpy tensors.
Args:
tmpdir (dir object): temp dir provided by pytest fixture.
nbatches (int): number of batches to use.
batch_size (int): number of samples per batch.
keys (list): list of dictionary keys.
input_shapes (list): list of shapes.
dtype (dtype): data type to use.
bit_depth (int): size (in bytes) of each element.
"""
filename = os.path.join(str(tmpdir), "tensors.h5")
batch_shapes = [(batch_size,) + input_shape for input_shape in input_shapes]
data_shapes = [(nbatches,) + batch_shape for batch_shape in batch_shapes]
datasets = [
np.random.random_sample(shape).astype(dtype) for shape in data_shapes
]
# Dump data to file.
with TensorFile(filename, "w", enforce_same_shape=False) as f:
for i in range(nbatches):
batch = {}
for idx, key in enumerate(keys):
batch[key] = datasets[idx][i]
f.write(batch)
assert os.path.isfile(filename)
# Read back through sequential accesses.
with TensorFile(filename, "r", enforce_same_shape=False) as f:
for batch_index, batch in enumerate(f):
assert isinstance(batch, dict)
for key_index, key in enumerate(keys):
assert batch[key].dtype == dtype
assert np.allclose(batch[key], datasets[key_index][batch_index])
# Read back through random accesses.
with TensorFile(filename, "r", enforce_same_shape=False) as f:
indices = list(range(nbatches))
random.shuffle(indices)
for batch_index in indices:
f.seek(batch_index)
batch = f.read()
assert isinstance(batch, dict)
for key_index, key in enumerate(keys):
assert batch[key].dtype == dtype
assert np.allclose(batch[key], datasets[key_index][batch_index])
@pytest.mark.parametrize("shape, dtype", [((1, 24, 48), np.float32)])
def test_nested_dict(self, tmpdir, shape, dtype):
"""Test that we can read/write nested dictionaries of numpy tensors.
Args:
tmpdir (dir object): temp dir provided by pytest fixture.
shape (list): tensor shape.
dtype (dtype): data type to use.
bit_depth (int): size (in bytes) of each element.
"""
filename = os.path.join(str(tmpdir), "tensors.h5")
batch = {
"c1": {
"o1": np.random.random_sample(shape).astype(dtype),
"o2": np.random.random_sample(shape).astype(dtype),
},
"o1": np.random.random_sample(shape).astype(dtype),
}
# Dump data to file.
with TensorFile(filename, "w") as f:
f.write(batch)
assert os.path.isfile(filename)
# Read back through sequential accesses.
with TensorFile(filename, "r") as f:
read_batch = f.read()
assert "o1" in read_batch
assert np.allclose(batch["o1"], read_batch["o1"])
assert "c1" in read_batch
assert "o1" in read_batch["c1"]
assert np.allclose(batch["c1"]["o1"], read_batch["c1"]["o1"])
assert "o2" in read_batch["c1"]
assert np.allclose(batch["c1"]["o2"], read_batch["c1"]["o2"])
@pytest.mark.parametrize(
"batch_sizes, input_shape, dtype, enforce_same_shape",
[
([8, 8, 16, 8], (1, 28, 28), np.float32, True),
([8, 8, 16, 8], (1, 28, 28), np.float32, False),
],
)
def test_enforce_same_shape(
self, tmpdir, batch_sizes, input_shape, dtype, enforce_same_shape
):
"""Test shape enforcement.
Args:
tmpdir (dir object): temp dir provided by pytest fixture.
batch_sizes (list): list of batch sizes.
input_shape (list): shape of each sample.
dtype (dtype): data type to use.
enforce_same_shape (bool): whether to enforce same shape.
"""
filename = os.path.join(str(tmpdir), "tensors.h5")
# Dump data to file.
with TensorFile(
filename, "w", enforce_same_shape=enforce_same_shape
) as f:
prev_batch_size = batch_sizes[0]
for batch_size in batch_sizes:
batch_shape = (batch_size,) + input_shape
batch = np.random.random_sample(batch_shape).astype(dtype)
if enforce_same_shape and batch_size != prev_batch_size:
with pytest.raises(ValueError):
f.write(batch)
else:
f.write(batch)
def test_read_inexistant_file(self, tmpdir):
"""Test that an error is dropped when trying to open an inexistant file."""
filename = os.path.join(str(tmpdir), "tensors.h5")
with pytest.raises(IOError):
with TensorFile(filename, "r"):
pass
def test_read_error(self, tmpdir):
"""Test that an error is dropped when trying to read from a write-only file."""
filename = os.path.join(str(tmpdir), "tensors.h5")
# Create the file.
with TensorFile(filename, "w"):
pass
# Open for writing and try to read.
with pytest.raises(IOError):
with TensorFile(filename, "w") as f:
f.read()
def test_write_error(self, tmpdir):
"""Test that an error is dropped when trying to write to a read-only file."""
filename = os.path.join(str(tmpdir), "tensors.h5")
# Create the file.
with TensorFile(filename, "w"):
pass
# Open for reading and try to write.
with pytest.raises(IOError):
with TensorFile(filename, "r") as f:
f.write(np.zeros(10))
def keras_classification_model(num_samples=1000, nepochs=5, batch_size=10):
# Create a dummy Keras classification model.
# Define model.
inputs = keras.layers.Input((1,))
x = keras.layers.Dense(1, activation="linear")(inputs)
outputs = keras.layers.Dense(2, activation="softmax")(x)
m = keras.models.Model(inputs, outputs)
# Compile model.
optimizer = keras.optimizers.Adam(lr=0.01)
m.compile(loss="mse", optimizer=optimizer, metrics=["accuracy"])
# Generate training data.
x_train = np.linspace(0, 1, num=num_samples)
np.random.shuffle(x_train)
y_train = np.zeros((num_samples, 2))
y_train[x_train > 0.5, 1] = 1
y_train[x_train <= 0.5, 0] = 1
# Train model and verify accuracy.
res = m.fit(x_train, y_train, batch_size=batch_size, epochs=nepochs)
logger.info("Keras Model average accuracy: %.3f", res.history["acc"][-1])
assert res.history["acc"][-1] > 0.95
n_batches = num_samples // batch_size
x_batches = [
x_train[i * batch_size : (i + 1) * batch_size] for i in range(n_batches)
]
y_batches = [
y_train[i * batch_size : (i + 1) * batch_size] for i in range(n_batches)
]
return n_batches, batch_size, x_batches, y_batches, m
@pytest.fixture(scope="function")
def classification_model(num_samples=1000, nepochs=5, batch_size=10):
# Test fixture to create a TensorRT engine.
n_batches, batch_size, x_batches, y_batches, m = keras_classification_model()
MAX_WORKSPACE = 1 << 28
MAX_BATCHSIZE = 16
_, out_tensor_name, engine = keras_to_tensorrt(
m,
input_dims=(1, 1, 1),
max_workspace_size=MAX_WORKSPACE,
max_batch_size=MAX_BATCHSIZE,
)
return n_batches, batch_size, x_batches, y_batches, out_tensor_name, engine
class TestTensorRTInference(object):
"""Test TensorRT inference."""
def test_classify_iterator(self, tmpdir, classification_model):
"""Test TRT classification on trained model using iterator API."""
assert cuda is not None, "CUDA not imported."
assert trt is not None, "TRT not imported."
n_batches, batch_size, x_batches, y_batches, out_tensor_name, engine = (
classification_model
)
# Verify accuracy of TensorRT engine using the iterator API.
acc_accuracy = 0
for i, output in enumerate(engine.infer_iterator(x_batches)):
labels = np.argmax(y_batches[i], axis=1)
predictions = np.argmax(output[out_tensor_name].squeeze(), axis=1)
accuracy = np.sum(predictions == labels) / float(batch_size)
acc_accuracy += accuracy
avg_accuracy = acc_accuracy / n_batches
logger.info(
"TensorRT Model average accuracy (iterator API): %.3f", avg_accuracy
)
assert avg_accuracy > 0.95
# Very loose check that the reported forward time is sensible.
forward_time = engine.get_forward_time()
assert 0 < forward_time < 1
def test_classify(self, tmpdir, classification_model):
"""Test TRT classification on trained model using single batch inference API."""
n_batches, batch_size, x_batches, y_batches, out_tensor_name, engine = (
classification_model
)
# Verify accuracy using the single array inference API.
acc_accuracy = 0
for i in range(n_batches):
output = engine.infer(x_batches[i])
labels = np.argmax(y_batches[i], axis=1)
predictions = np.argmax(output[out_tensor_name].squeeze(), axis=1)
accuracy = np.sum(predictions == labels) / float(batch_size)
acc_accuracy += accuracy
avg_accuracy = acc_accuracy / n_batches
logger.info("TensorRT Model average accuracy: %.3f", avg_accuracy)
assert avg_accuracy > 0.95
def test_serialize_classify(self, tmpdir, classification_model):
"""Test TRT classification after serializing and reloading from file."""
n_batches, batch_size, x_batches, y_batches, out_tensor_name, engine = (
classification_model
)
# Serialize TensorRT engine to a file.
trt_filename = os.path.join(str(tmpdir), "model.trt")
engine.save(trt_filename)
# Delete the engine, load the engine again from its serialized
# representation and verify accuracy.
del engine
engine = nvidia_tao_tf1.core.export.load_tensorrt_engine(trt_filename)
acc_accuracy = 0
for i, output in enumerate(engine.infer_iterator(x_batches)):
labels = np.argmax(y_batches[i], axis=1)
predictions = np.argmax(output[out_tensor_name].squeeze(), axis=1)
accuracy = np.sum(predictions == labels) / float(batch_size)
acc_accuracy += accuracy
avg_accuracy = acc_accuracy / n_batches
logger.info("TensorRT Model average accuracy: %.3f", avg_accuracy)
assert avg_accuracy > 0.95
logger.info("Serialized TensorRT Model average accuracy: %.3f", avg_accuracy)
def test_classification_int8(self, tmpdir):
"""Test TRT classification in reduce precision."""
third_party.keras.tensorflow_backend.limit_tensorflow_GPU_mem(gpu_fraction=0.9)
n_batches, batch_size, x_batches, y_batches, m = keras_classification_model()
# Serialize input batches to file.
tensor_filename = os.path.join(str(tmpdir), "tensor.dump")
with TensorFile(tensor_filename, "w") as f:
for x_batch in x_batches:
f.write(x_batch)
try:
cal_cache_filename = os.path.join(str(tmpdir), "cal.bin")
_, out_tensor_name, engine = keras_to_tensorrt(
m,
input_dims=(1, 1, 1),
dtype="int8",
calibration_data_filename=tensor_filename,
calibration_cache_filename=cal_cache_filename,
calibration_n_batches=n_batches,
calibration_batch_size=batch_size,
)
except AttributeError as e:
logger.warning(str(e))
pytest.skip(str(e))
# Verify accuracy of TensorRT engine using the iterator API.
acc_accuracy = 0
for i, output in enumerate(engine.infer_iterator(x_batches)):
labels = np.argmax(y_batches[i], axis=1)
predictions = np.argmax(output[out_tensor_name].squeeze(), axis=1)
accuracy = np.sum(predictions == labels) / float(batch_size)
acc_accuracy += accuracy
avg_accuracy = acc_accuracy / n_batches
logger.info(
"TensorRT INT8 Model average accuracy (iterator API): %.3f", avg_accuracy
)
assert avg_accuracy > 0.95
@pytest.mark.parametrize(
"model, nlayers, data_format, use_batch_norm, input_shape,"
"batch_size, parser",
[
(HelNet, 6, "channels_first", True, (3, 128, 256), 2, "uff"),
(HelNet, 6, "channels_first", True, (3, 128, 256), 2, "caffe"),
# TODO(MOD-435)
],
)
def test_net_output(
self,
tmpdir,
model,
nlayers,
data_format,
use_batch_norm,
input_shape,
batch_size,
parser,
):
"""Create a model with random weights and match Keras and TensorRT output."""
if parser == "onnx" and not _onnx_supported:
return
with tf.device("cpu:0"):
# Creating graph on CPU to leave GPU memory to TensorRT.
inputs = keras.layers.Input(shape=input_shape)
keras_model = model(
nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format
)
data_shape = (batch_size,) + input_shape
data = np.random.random_sample(data_shape)
keras_output = keras_model.predict(data)
_, out_tensor_name, engine = keras_to_tensorrt(
keras_model,
input_dims=input_shape,
max_batch_size=batch_size,
parser=parser,
)
tensorrt_output = engine.infer(data)[out_tensor_name]
assert keras_output.shape == tensorrt_output.shape
assert np.allclose(keras_output, tensorrt_output, atol=1e-2)
@pytest.mark.parametrize(
"input_shape," "batch_size, concat_axis, parser",
[((3, 48, 16), 2, 1, "caffe"), ((3, 64, 32), 4, 2, "caffe")],
)
def test_concat(self, tmpdir, input_shape, batch_size, concat_axis, parser):
"""Create a model with branches and a concat layer and match Keras and TensorRT output."""
if concat_axis != 1:
pytest.skip("TensorRT does not support concatenation on axis!=1.")
with tf.device("cpu:0"):
# Creating graph on CPU to leave GPU memory to TensorRT.
inputs = keras.layers.Input(shape=input_shape)
inputs_relu = keras.layers.Activation("relu")(inputs)
inputs_sigmoid = keras.layers.Activation("sigmoid")(inputs)
inputs_softmax_with_axis = keras.layers.Softmax(axis=1)(inputs)
net_output = keras.layers.Concatenate(axis=concat_axis)(
[inputs_relu, inputs_sigmoid, inputs_softmax_with_axis]
)
keras_model = keras.models.Model(inputs=inputs, outputs=net_output)
data_shape = (batch_size,) + input_shape
data = np.random.random_sample(data_shape)
keras_output = keras_model.predict(data)
_, out_tensor_name, engine = keras_to_tensorrt(
keras_model,
input_dims=input_shape,
max_batch_size=batch_size,
parser=parser,
)
tensorrt_output = engine.infer(data)[out_tensor_name]
assert keras_output.shape == tensorrt_output.shape
assert np.allclose(keras_output, tensorrt_output, atol=1e-2)
@pytest.mark.parametrize(
"model, nlayers, input_shape, batch_size, use_batch_norm, data_format,"
"padding, kernel_size, strides, parser",
[
(
HelNet,
6,
(3, 96, 64),
2,
True,
"channels_first",
"valid",
[1, 1],
(1, 1),
"caffe",
),
(
HelNet,
6,
(3, 96, 64),
2,
True,
"channels_first",
"valid",
[1, 1],
(1, 1),
"uff",
),
(
HelNet,
6,
(3, 96, 64),
2,
True,
"channels_first",
"same",
[1, 1],
(1, 1),
"caffe",
),
(
HelNet,
6,
(3, 96, 64),
2,
True,
"channels_first",
"same",
[1, 1],
(1, 1),
"uff",
),
(
HelNet,
6,
(3, 96, 64),
2,
True,
"channels_first",
"valid",
[3, 3],
(1, 1),
"caffe",
),
(
HelNet,
6,
(3, 96, 64),
2,
True,
"channels_first",
"valid",
[3, 3],
(1, 1),
"uff",
),
(
HelNet,
6,
(3, 96, 64),
2,
True,
"channels_first",
"same",
[3, 3],
(1, 1),
"caffe",
),
(
HelNet,
6,
(3, 96, 64),
2,
True,
"channels_first",
"same",
[3, 3],
(1, 1),
"uff",
),
# TODO(MOD-435)
],
)
def test_conv2dtranspose_layers(
self,
tmpdir,
model,
nlayers,
input_shape,
batch_size,
use_batch_norm,
data_format,
padding,
kernel_size,
strides,
parser,
):
"""
Test that models with Conv2DTranspose layers convert to TensorRT correctly.
Includes tests where kernel_size and strides are not equal, for both same and valid padding.
"""
if parser == "onnx" and not _onnx_supported:
return
keras.backend.clear_session()
third_party.keras.tensorflow_backend.limit_tensorflow_GPU_mem(gpu_fraction=0.9)
with tf.device("cpu:0"):
nvidia_tao_tf1.core.utils.set_random_seed(1)
# Creating graph on CPU to leave GPU memory to TensorRT.
inputs = keras.layers.Input(shape=input_shape)
keras_model = model(
nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format
)
x = keras_model.outputs[0]
# Add Conv2DTranspose layer.
# comment out this to walk around a bug in TRT 7.0/7.1.
# see: https://nvbugswb.nvidia.com/NvBugs5/SWBug.aspx?bugid=200603939&cmtNo=
# num_filters = 4
# x = keras.layers.Conv2DTranspose(
# filters=num_filters,
# kernel_size=kernel_size,
# strides=strides,
# padding=padding,
# activation="relu",
# name="conv2DTranspose",
# )(x)
keras_model = keras.models.Model(inputs=inputs, outputs=x)
data_shape = (batch_size,) + input_shape
data = np.random.random_sample(data_shape)
keras_output = keras_model.predict(data)
_, out_tensor_name, engine = keras_to_tensorrt(
keras_model,
input_dims=input_shape,
max_batch_size=batch_size,
parser=parser,
)
tensorrt_output = engine.infer(data)[out_tensor_name]
assert keras_output.shape == tensorrt_output.shape
assert np.allclose(keras_output, tensorrt_output, atol=1e-2)
@pytest.mark.parametrize(
"input_shape, batch_size, parser, use_gru",
[
((3, 4, 6), 2, "caffe", False),
((3, 6, 4), 4, "uff", False),
((3, 6, 6), 1, "uff", True),
((3, 6, 4), 4, "onnx", False),
],
)
def test_multiple_outputs(self, tmpdir, input_shape, batch_size, parser, use_gru):
"""Create a model with multiple outputs and match and TensorRT output."""
if parser == "onnx" and not _onnx_supported:
return
tf.reset_default_graph()
keras.backend.clear_session()
with tf.device("cpu:0"):
# Creating graph on CPU to leave GPU memory to TensorRT.
if parser == 'onnx':
inputs = keras.layers.Input(
name="input_1",
batch_shape=(batch_size,) + input_shape)
else:
inputs = keras.layers.Input(name="input_1", shape=input_shape)
conv = keras.layers.Conv2D(16, (3, 3))(inputs)
relu = keras.layers.Activation("relu", name="relu0")(conv)
sigmoid = keras.layers.Activation("sigmoid", name="sigmoid0")(conv)
keras_model = keras.models.Model(inputs=inputs, outputs=[relu, sigmoid])
data_shape = (batch_size,) + input_shape
data = np.random.random_sample(data_shape)
keras_outputs = keras_model.predict(data)
if use_gru:
assert (
parser == "uff"
), "Only UFF parser is supported for exporting GRU models."
# An RNN/GRU TRT model needs extra input, that is the state
# at last time step. This is needed as TRT cannot handle state internally.
# State input shape that is aligned with the input shapes.
state_input_shape = tuple(relu.shape.as_list()[1:])
state_data_shape = (batch_size,) + state_input_shape
state_data = np.random.random_sample(state_data_shape)
# Create an export compatible convolutional GRU layer.
convgru2d_export_layer = ConvGRU2DExport(
model_sequence_length_in_frames=1,
input_sequence_length_in_frames=1,
state_scaling=0.9,
input_shape=relu.shape.as_list(),
initial_state_shape=[None] + list(state_input_shape),
spatial_kernel_height=1,
spatial_kernel_width=1,
kernel_regularizer=None,
bias_regularizer=None,
)
gru_output = convgru2d_export_layer(relu)
keras_model = keras.models.Model(
inputs=inputs, outputs=[relu, sigmoid, gru_output]
)
# Test the output of the end-to-end keraas model.
# A session is used instead of keras.predict since a feed_dict is needed.
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
keras_gru_output = sess.run(
gru_output,
feed_dict={
convgru2d_export_layer._initial_state: state_data,
inputs: data,
},
)
# Add the state input for GRU input to input data and dimensions.
input_dims = {
"input_1": input_shape,
"conv_gru_2d_export/state_placeholder": state_input_shape,
}
input_data = {
"input_1": data,
"conv_gru_2d_export/state_placeholder": state_data,
}
keras_outputs.append(keras_gru_output)
else:
# Single input (no sate input).
input_dims = input_shape
input_data = data
output_layer_names = ["relu0", "sigmoid0"]
output_layers = [keras_model.get_layer(name) for name in output_layer_names]
if parser == "uff":
# Provide TensorFlow tensor names.
output_node_names = [
l.get_output_at(0).name.split(":")[0] for l in output_layers
]
elif parser == "onnx":
output_node_names = [l.get_output_at(0).name.split("/")[0] for l in output_layers]
elif parser == "caffe":
output_node_names = []
for l in output_layers:
if hasattr(l, "activation") and l.activation.__name__ != "linear":
output_node_names.append(
"%s/%s" % (l.name, l.activation.__name__.capitalize())
)
else:
output_node_names.append(l.name)
else:
raise ValueError("Unknown parser: %s" % parser)
# Pass ConvGRU2DExport as the custom object to the exporter if gru is being used.
custom_objects = {"ConvGRU2DExport": ConvGRU2DExport} if use_gru else None
if use_gru:
output_node_names += ["conv_gru_2d_export/state_output"]
_, _, engine = keras_to_tensorrt(
keras_model,
input_dims=input_dims,
max_batch_size=batch_size,
parser=parser,
output_node_names=output_node_names,
custom_objects=custom_objects,
)
inferred_data = engine.infer(input_data)
print("INFER: {}".format(list(inferred_data.keys())))
tensorrt_outputs = [inferred_data[name] for name in output_node_names]
# Check that keras and TRT model outputs are aligned and match correspondingly.
assert len(keras_outputs) == len(tensorrt_outputs)
assert all(
[a.shape == b.shape for (a, b) in zip(keras_outputs, tensorrt_outputs)]
)
assert all(
[
np.allclose(a, b, atol=1e-2)
for (a, b) in zip(keras_outputs, tensorrt_outputs)
]
)
@pytest.mark.parametrize(
"batch_size, parser, use_gru",
[(2, "caffe", False), (4, "uff", False), (1, "uff", True), (4, "onnx", False)],
)
def test_multiple_inputs(self, tmpdir, batch_size, parser, use_gru):
"""
Create a model with multiple inputs and match and TensorRT output.
Also create caffe model and make sure input layer names match keras inputs.
"""
if parser == "onnx" and not _onnx_supported:
return
with tf.device("cpu:0"):
# Creating graph on CPU to leave GPU memory to TensorRT.
# Creating a model that takes two inputs where the first input
# is twice as large as the second input in each dimension.
# The first input goes through a conv layer with stride 2.
# The second input goes through a conv layer with stride 1.
# Outputs of the conv layers are then concatenated before going through
# a final convolution.
if parser == 'onnx':
input_0_shape = (batch_size, 3, 32, 64)
input_1_shape = (batch_size, 3, 16, 32)
input_0 = keras.layers.Input(batch_shape=input_0_shape, name="input_0")
input_1 = keras.layers.Input(batch_shape=input_1_shape, name="input_1")
else:
input_0_shape = (3, 32, 64)
input_1_shape = (3, 16, 32)
input_0 = keras.layers.Input(shape=input_0_shape, name="input_0")
input_1 = keras.layers.Input(shape=input_1_shape, name="input_1")
conv_0 = keras.layers.Conv2D(16, (1, 1), strides=(2, 2))(input_0)
conv_1 = keras.layers.Conv2D(16, (1, 1), strides=(1, 1))(input_1)
# add = keras.layers.Add()([conv_0, conv_1])
merged = keras.layers.Concatenate(axis=1, name="concat")([conv_0, conv_1])
conv = keras.layers.Conv2D(8, (3, 3))(merged)
net_output = keras.layers.Activation("relu", name="relu0")(conv)
keras_model = keras.models.Model(
inputs=[input_0, input_1], outputs=net_output
)
if parser != 'onnx':
data_0_shape = (batch_size,) + input_0_shape
data_1_shape = (batch_size,) + input_1_shape
else:
data_0_shape = input_0_shape
data_1_shape = input_1_shape
data_0 = np.random.random_sample(data_0_shape)
data_1 = np.random.random_sample(data_1_shape)
if use_gru:
assert (
parser == "uff"
), "Only UFF parser is supported for exporting GRU models."
# An RNN/GRU TRT model the state at last time step. as extra input.
# (As TRT cannot handle state internally).
# State input shape that is aligned with the input shapes.
state_input_shape = (8, 14, 30)
state_data_shape = (batch_size,) + state_input_shape
state_data = np.random.random_sample(state_data_shape)
# Create an export compatible convolutional GRU layer.
convgru2d_export_layer = ConvGRU2DExport(
model_sequence_length_in_frames=1,
input_sequence_length_in_frames=1,
state_scaling=0.9,
input_shape=net_output.shape.as_list(),
initial_state_shape=[None] + list(state_input_shape),
spatial_kernel_height=1,
spatial_kernel_width=1,
kernel_regularizer=None,
bias_regularizer=None,
)
gru_output = convgru2d_export_layer(net_output)
keras_model = keras.models.Model(
inputs=[input_0, input_1], outputs=gru_output
)
# Test the output of the end-to-end keraas model.
# A session is used instead of keras.predict since a feed_dict is needed.
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
keras_output = sess.run(
gru_output,
feed_dict={
convgru2d_export_layer._initial_state: state_data,
input_0: data_0,
input_1: data_1,
},
)
# Add the state input for GRU input to input data and dimensions.
input_dims = {
"input_0": input_0_shape,
"input_1": input_1_shape,
"conv_gru_2d_export/state_placeholder": state_input_shape,
}
input_data = {
"input_0": data_0,
"input_1": data_1,
"conv_gru_2d_export/state_placeholder": state_data,
}
else:
keras_output = keras_model.predict([data_0, data_1])
input_dims = {"input_0": input_0_shape, "input_1": input_1_shape}
input_data = {"input_0": data_0, "input_1": data_1}
# If the parser is caffe, check that caffe and keras input layer names match.
if parser == "caffe":
caffe_in_names, _ = nvidia_tao_tf1.core.export.keras_to_caffe(
keras_model,
os.path.join(str(tmpdir) + "/model.prototxt"),
os.path.join(str(tmpdir) + "/model.caffemodel"),
)
assert all(
[caffe_name in input_dims.keys() for caffe_name in caffe_in_names]
)
custom_objects = {"ConvGRU2DExport": ConvGRU2DExport} if use_gru else None
# Check that keras and TRT model outputs match after inference.
_, out_tensor_name, engine = keras_to_tensorrt(
keras_model,
input_dims=input_dims,
max_batch_size=batch_size,
parser=parser,
custom_objects=custom_objects,
)
inferred_data = engine.infer(input_data)
tensorrt_output = inferred_data[out_tensor_name]
assert keras_output.shape == tensorrt_output.shape
assert np.allclose(keras_output, tensorrt_output, atol=1e-2)
@pytest.mark.parametrize(
"model, nlayers, data_format, use_batch_norm, input_shape, "
"batch_size, parser",
[(HelNet, 6, "channels_first", True, (3, 128, 256), 1, "uff")],
)
def test_net_output_with_gru(
self,
tmpdir,
model,
nlayers,
data_format,
use_batch_norm,
input_shape,
batch_size,
parser,
):
"""
Create a model with random weights and match Keras and TensorRT output.
The model includes a GRU layer and hence has an external state input."""
with tf.device("cpu:0"):
# Creating graph on CPU to leave GPU memory to TensorRT.
inputs = keras.layers.Input(shape=input_shape, name="input_1")
feature_model = model(
nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format
)
# Get the feature outputs (Input to the GRU).
feature_out = feature_model.outputs[0]
feature_out_shape = feature_out.shape.as_list()
gru_input_shape = tuple(feature_out_shape[1:])
# Shape and random input data for the GRU/RNN state.
state_data_shape = (batch_size,) + tuple(feature_out.shape.as_list()[1:])
state_data = np.float32(np.random.random_sample(state_data_shape))
# Shape and random input data for non-state input-
data_shape = (batch_size,) + input_shape
data = np.float32(np.random.random_sample(data_shape))
# Add the convolutinal GRU export layer.
convgru2d_export_layer = ConvGRU2DExport(
model_sequence_length_in_frames=1,
input_sequence_length_in_frames=1,
state_scaling=0.9,
input_shape=feature_out_shape,
initial_state_shape=feature_out_shape,
spatial_kernel_height=1,
spatial_kernel_width=1,
kernel_regularizer=None,
bias_regularizer=None,
is_stateful=True,
)
net_output = convgru2d_export_layer(feature_out)
# Construct end-to-end keras model.
keras_model = keras.models.Model(inputs=inputs, outputs=net_output)
# Test the output of the end-to-end keraas model.
# A session is used instead of keras.predict since a feed_dict is needed.
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
keras_output_value = sess.run(
net_output,
feed_dict={
convgru2d_export_layer._initial_state: state_data,
inputs: data,
},
)
# Input dims is now a dictionary, just as in the case of multiple inputs.
input_dims = {
"input_1": input_shape,
"conv_gru_2d_export/state_placeholder": gru_input_shape,
}
print(f"Input dimensions: {input_dims}")
_, out_tensor_name, engine = keras_to_tensorrt(
keras_model,
input_dims=input_dims,
max_batch_size=batch_size,
parser=parser,
custom_objects={"ConvGRU2DExport": ConvGRU2DExport},
)
# Input data for TRT inference.
input_data = {
"conv_gru_2d_export/state_placeholder": state_data,
"input_1": data,
}
tensorrt_output = engine.infer(input_data)[out_tensor_name]
assert keras_output_value.shape == tensorrt_output.shape
assert np.allclose(keras_output_value, tensorrt_output, atol=1e-2)
@pytest.mark.parametrize(
"model, nlayers, data_format, use_batch_norm, input_shape,"
"batch_size, dropout_type, parser",
[
(HelNet, 6, "channels_first", True, (3, 128, 256), 2, "dropout", "uff"),
(HelNet, 6, "channels_first", True, (3, 128, 256), 2, "dropout", "caffe"),
(
HelNet,
6,
"channels_first",
True,
(3, 128, 256),
2,
"spatial_dropout_2d",
"uff",
),
(
HelNet,
6,
"channels_first",
True,
(3, 128, 256),
2,
"spatial_dropout_2d",
"caffe",
),
],
)
def test_dropout(
self,
tmpdir,
model,
nlayers,
data_format,
use_batch_norm,
input_shape,
batch_size,
dropout_type,
parser,
):
"""Test the models with dropout convert to TensorRT correctly."""
with tf.device("cpu:0"):
# Creating graph on CPU to leave GPU memory to TensorRT.
inputs = keras.layers.Input(shape=input_shape)
keras_model = model(
nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format
)
x = keras_model.outputs[0]
assert dropout_type in ["dropout", "spatial_dropout_2d"]
if dropout_type == "dropout":
x = keras.layers.Dropout(rate=0.5)(x)
elif dropout_type == "spatial_dropout_2d":
x = keras.layers.SpatialDropout2D(rate=0.5)(x)
x = keras.layers.Conv2D(32, (3, 3), name="conv2d_output", padding="same")(x)
keras_model = keras.models.Model(inputs=inputs, outputs=x)
data_shape = (batch_size,) + input_shape
data = np.random.random_sample(data_shape)
keras_output = keras_model.predict(data)
_, out_tensor_name, engine = keras_to_tensorrt(
keras_model,
input_dims=input_shape,
max_batch_size=batch_size,
parser=parser,
)
tensorrt_output = engine.infer(data)[out_tensor_name]
assert keras_output.shape == tensorrt_output.shape
assert np.allclose(keras_output, tensorrt_output, atol=1e-2)
@pytest.mark.skipif(
os.getenv("RUN_ON_CI", "0") == "1",
reason="Cannot be run on CI"
)
@pytest.mark.parametrize(
"input_shape, batch_size, parser",
[((3, 15, 30), 2, "caffe"), ((3, 15, 30), 2, "uff")],
)
def test_dense_dropout(self, tmpdir, input_shape, batch_size, parser):
"""Test the models with dropout after a dense layer convert to TensorRT correctly."""
with tf.device("cpu:0"):
# Creating graph on CPU to leave GPU memory to TensorRT.
inputs = keras.layers.Input(shape=input_shape)
x = keras.layers.Flatten()(inputs)
x = keras.layers.Dense(128, activation="tanh")(x)
x = keras.layers.Dropout(rate=0.5)(x)
x = keras.layers.Dense(128, activation="tanh")(x)
keras_model = keras.models.Model(inputs=inputs, outputs=x)
data_shape = (batch_size,) + input_shape
data = np.random.random_sample(data_shape)
keras_output = keras_model.predict(data)
_, out_tensor_name, engine = keras_to_tensorrt(
keras_model,
input_dims=input_shape,
max_batch_size=batch_size,
parser=parser,
)
tensorrt_output = engine.infer(data)[out_tensor_name].squeeze()
assert keras_output.shape == tensorrt_output.shape
assert np.allclose(keras_output, tensorrt_output, atol=1e-2)
@pytest.mark.parametrize(
"model, nlayers, data_format, use_batch_norm, input_shape,"
"padding, batch_size, parser",
[
(HelNet, 6, "channels_first", True, (3, 96, 64), "valid", 2, "uff"),
(HelNet, 6, "channels_first", True, (3, 160, 64), "valid", 5, "caffe"),
],
)
def test_eltwise_op_layers(
self,
tmpdir,
model,
nlayers,
data_format,
use_batch_norm,
input_shape,
padding,
batch_size,
parser,
):
"""Test the models with max pooling convert to TensorRT correctly."""
with tf.device("cpu:0"):
# Creating graph on CPU to leave GPU memory to TensorRT.
inputs = keras.layers.Input(shape=input_shape)
keras_model = model(
nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format
)
x = keras_model.outputs[0]
# Branches for distinct add, subtract, and multiply layers.
# First branch.
num_filters = 4
x1 = keras.layers.Conv2D(
filters=num_filters,
kernel_size=[1, 1],
strides=(1, 1),
padding="same",
activation="sigmoid",
name="conv2d_x1",
)(x)
# Second branch.
x2 = keras.layers.Conv2D(
filters=num_filters,
kernel_size=[1, 1],
strides=(1, 1),
padding="same",
activation="relu",
name="conv2d_x2",
)(x)
# Third branch.
x3 = keras.layers.Conv2D(
filters=num_filters,
kernel_size=[1, 1],
strides=(1, 1),
padding="same",
activation="linear",
name="conv2d_x3",
)(x)
# Fourth branch.
x4 = keras.layers.Conv2D(
filters=num_filters,
kernel_size=[1, 1],
strides=(1, 1),
padding="same",
activation="tanh",
name="conv2d_x4",
)(x)
# Add, Subtract, and Multiply
x = keras.layers.Add()([x1, x2])
x = keras.layers.Subtract()([x, x3])
x = keras.layers.Multiply()([x, x4])
# walk around of a a bug in TRT7.0 by setting the branches as output nodes
# see https://nvbugswb.nvidia.com/NvBugs5/SWBug.aspx?bugid=200602766&cmtNo=
# this bug is fixed in TRT 7.1, so remove this trick once we upgrade to TRT 7.1
keras_model = keras.models.Model(inputs=inputs, outputs=[x, x1, x2, x3, x4])
data_shape = (batch_size,) + input_shape
data = np.random.random_sample(data_shape)
keras_output = keras_model.predict(data)
_, out_tensor_name, engine = keras_to_tensorrt(
keras_model,
input_dims=input_shape,
max_batch_size=batch_size,
parser=parser,
)
tensorrt_output = engine.infer(data)[out_tensor_name[0]]
assert keras_output[0].shape == tensorrt_output.shape
assert np.allclose(keras_output[0], tensorrt_output, atol=1e-2)
@pytest.mark.parametrize(
"model, nlayers, data_format, use_batch_norm, input_shape,"
"batch_size, parser",
[
(HelNet, 6, "channels_first", True, (3, 96, 64), 4, "uff"),
(HelNet, 6, "channels_first", True, (3, 160, 64), 2, "caffe"),
],
)
def test_eltwise_op_with_broadcast(
self,
tmpdir,
model,
nlayers,
data_format,
use_batch_norm,
input_shape,
batch_size,
parser,
):
"""Test the models with broadcast element-wise op converts to TensorRT."""
with tf.device("cpu:0"):
# Creating graph on CPU to leave GPU memory to TensorRT.
inputs = keras.layers.Input(shape=input_shape)
keras_model = model(
nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format
)
x = keras_model.outputs[0]
# Project to one channel.
x_single_channel = keras.layers.Conv2D(
filters=1,
kernel_size=[1, 1],
strides=(1, 1),
padding="same",
activation="sigmoid",
)(x)
# Branches for distinct add, subtract, and multiply layers.
# First branch.
num_filters = 4
x1 = keras.layers.Conv2D(
filters=num_filters,
kernel_size=[1, 1],
strides=(1, 1),
padding="same",
activation="sigmoid",
name="conv2d_x1",
)(x)
x1 = keras.layers.Add()([x1, x_single_channel])
# Second branch.
x2 = keras.layers.Conv2D(
filters=num_filters,
kernel_size=[1, 1],
strides=(1, 1),
padding="same",
activation="relu",
name="conv2d_x2",
)(x)
x2 = keras.layers.Subtract()([x2, x_single_channel])
# Third branch.
x3 = keras.layers.Conv2D(
filters=num_filters,
kernel_size=[1, 1],
strides=(1, 1),
padding="same",
activation="linear",
name="conv2d_x3",
)(x)
x3 = keras.layers.Multiply()([x3, x_single_channel])
# Add them all together
x = keras.layers.Add()([x1, x2])
x = keras.layers.Add()([x, x3])
keras_model = keras.models.Model(inputs=inputs, outputs=x)
data_shape = (batch_size,) + input_shape
data = np.random.random_sample(data_shape)
keras_output = keras_model.predict(data)
_, out_tensor_name, engine = keras_to_tensorrt(
keras_model,
input_dims=input_shape,
max_batch_size=batch_size,
parser=parser,
)
tensorrt_output = engine.infer(data)[out_tensor_name]
assert keras_output.shape == tensorrt_output.shape
assert np.allclose(keras_output, tensorrt_output, atol=1e-2)
@pytest.mark.parametrize(
"data_format, input_shape, num_outbound_nodes_after_pad2d,"
"conv2d_pad_type, conv1_strides, conv2_strides, zeropad2d_padding,"
" batch_size, parser",
[
(
"channels_first",
(3, 96, 64),
0,
"valid",
(1, 1),
(1, 1),
(2, 2),
2,
"caffe",
),
(
"channels_first",
(3, 32, 32),
1,
"same",
(2, 2),
(2, 2),
(1, 1),
2,
"caffe",
),
(
"channels_first",
(3, 96, 64),
2,
"valid",
(2, 2),
(2, 2),
(1, 1),
2,
"caffe",
),
(
"channels_first",
(3, 48, 48),
0,
"same",
(1, 1),
(1, 1),
(2, 2),
2,
"uff",
),
(
"channels_first",
(3, 128, 64),
1,
"valid",
(2, 2),
(2, 2),
(1, 1),
2,
"uff",
),
(
"channels_first",
(3, 64, 128),
2,
"same",
(2, 2),
(2, 2),
(1, 1),
2,
"uff",
),
(
"channels_first",
(3, 96, 64),
0,
"valid",
(2, 2),
(1, 1),
(2, 2),
2,
"caffe",
),
(
"channels_first",
(3, 32, 32),
1,
"same",
(1, 1),
(2, 2),
(1, 1),
2,
"caffe",
),
(
"channels_first",
(3, 96, 64),
2,
"valid",
(1, 1),
(2, 2),
(1, 1),
2,
"caffe",
),
(
"channels_first",
(3, 48, 48),
0,
"same",
(2, 2),
(1, 1),
(2, 2),
2,
"uff",
),
(
"channels_first",
(3, 128, 64),
1,
"valid",
(1, 1),
(2, 2),
(1, 1),
2,
"uff",
),
(
"channels_first",
(3, 64, 128),
2,
"same",
(1, 1),
(2, 2),
(1, 1),
2,
"uff",
),
],
)
def test_zeropad2d_after_conv2d(
self,
tmpdir,
data_format,
input_shape,
num_outbound_nodes_after_pad2d,
conv2d_pad_type,
conv1_strides,
conv2_strides,
zeropad2d_padding,
batch_size,
parser,
):
"""Test the models with ZeroPadding2D after conv2d."""
with tf.device("cpu:0"):
# Creating graph on CPU to leave GPU memory to TensorRT.
inputs = keras.layers.Input(shape=input_shape)
x = keras.layers.Conv2D(32, (3, 3), padding="same", strides=conv1_strides)(
inputs
)
x = keras.layers.convolutional.ZeroPadding2D(
padding=zeropad2d_padding, data_format=data_format
)(x)
x = keras.layers.Conv2D(
32, (3, 3), padding=conv2d_pad_type, strides=conv2_strides
)(x)
if num_outbound_nodes_after_pad2d == 1:
x = keras.layers.Activation("relu")(x)
elif num_outbound_nodes_after_pad2d == 2:
x1 = keras.layers.Activation("relu")(x)
x2 = keras.layers.Activation("relu")(x)
x = keras.layers.Add()([x1, x2])
elif num_outbound_nodes_after_pad2d != 0:
raise ValueError(
"Unhandled num_outbound_nodes_after_pad2d: %d"
% num_outbound_nodes_after_pad2d
)
keras_model = keras.models.Model(inputs=inputs, outputs=x)
data_shape = (batch_size,) + input_shape
data = np.random.random_sample(data_shape)
keras_output = keras_model.predict(data)
_, out_tensor_name, engine = keras_to_tensorrt(
keras_model,
input_dims=input_shape,
max_batch_size=batch_size,
parser=parser,
)
tensorrt_output = engine.infer(data)[out_tensor_name]
assert keras_output.shape == tensorrt_output.shape
assert np.allclose(keras_output, tensorrt_output, atol=1e-2)
@pytest.mark.parametrize(
"model, nlayers, data_format, use_batch_norm, input_shape,"
"padding, pooling_type, batch_size, parser",
[
(HelNet, 6, "channels_first", True, (3, 96, 64), "valid", "AVE", 2, "uff"),
(HelNet, 6, "channels_first", True, (3, 64, 96), "same", "AVE", 3, "uff"),
(
HelNet,
6,
"channels_first",
True,
(3, 128, 128),
"same",
"AVE",
4,
"caffe",
),
(
HelNet,
6,
"channels_first",
True,
(3, 160, 64),
"valid",
"AVE",
5,
"caffe",
),
(HelNet, 6, "channels_first", True, (3, 96, 64), "valid", "MAX", 2, "uff"),
(HelNet, 6, "channels_first", True, (3, 64, 96), "same", "MAX", 3, "uff"),
(
HelNet,
6,
"channels_first",
True,
(3, 128, 128),
"same",
"MAX",
4,
"caffe",
),
(
HelNet,
6,
"channels_first",
True,
(3, 160, 64),
"valid",
"MAX",
5,
"caffe",
),
],
)
def test_pooling(
self,
tmpdir,
model,
nlayers,
data_format,
use_batch_norm,
input_shape,
padding,
pooling_type,
batch_size,
parser,
):
"""Test the models with average and max pooling convert to TensorRT correctly."""
with tf.device("cpu:0"):
# Creating graph on CPU to leave GPU memory to TensorRT.
inputs = keras.layers.Input(shape=input_shape)
keras_model = model(
nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format
)
x = keras_model.outputs[0]
assert pooling_type in ["AVE", "MAX"]
if pooling_type == "AVE":
x = keras.layers.AveragePooling2D(pool_size=(2, 2), padding=padding)(x)
elif pooling_type == "MAX":
x = keras.layers.MaxPooling2D(pool_size=(2, 2), padding=padding)(x)
x = keras.layers.Conv2D(32, (3, 3), name="conv2d_output", padding="same")(x)
keras_model = keras.models.Model(inputs=inputs, outputs=x)
data_shape = (batch_size,) + input_shape
data = np.random.random_sample(data_shape)
keras_output = keras_model.predict(data)
_, out_tensor_name, engine = keras_to_tensorrt(
keras_model,
input_dims=input_shape,
max_batch_size=batch_size,
parser=parser,
)
tensorrt_output = engine.infer(data)[out_tensor_name]
assert keras_output.shape == tensorrt_output.shape
assert np.allclose(keras_output, tensorrt_output, atol=1e-2)
@pytest.mark.parametrize("model", [HelNet])
@pytest.mark.parametrize("nlayers", [6])
# NOTE: input_shape (3, 64, 64) currently fails, likely due to some error while parsing the UFF
# or Caffe model back into TRT, as Tensorflow, TRT, Keras **all** have outputs that match before
# export.
@pytest.mark.parametrize("input_shape", [(3, 96, 96)])
@pytest.mark.parametrize("batch_size", [2])
@pytest.mark.parametrize("use_batch_norm", [True, False])
@pytest.mark.parametrize("data_format", ["channels_first"])
@pytest.mark.parametrize("parser", ["caffe", "uff"])
@pytest.mark.parametrize(
"kernel_size,dilation_rate",
[((1, 1), (1, 1)), ((3, 3), (1, 1)), ((3, 3), (2, 2))],
)
def test_conv2d_dilation_layers(
self,
tmpdir,
model,
nlayers,
input_shape,
batch_size,
use_batch_norm,
data_format,
kernel_size,
parser,
dilation_rate,
):
"""
Test that models with Conv2D layers with dilation convert to TensorRT correctly.
"""
keras.backend.clear_session()
third_party.keras.tensorflow_backend.limit_tensorflow_GPU_mem(gpu_fraction=0.9)
with tf.device("cpu:0"):
nvidia_tao_tf1.core.utils.set_random_seed(1)
# Creating graph on CPU to leave GPU memory to TensorRT.
inputs = keras.layers.Input(shape=input_shape)
keras_model = model(
nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format
)
x = keras_model.outputs[0]
# Add Conv2D layer with dilation.
num_filters = 4
y = keras.layers.Conv2D(
filters=num_filters,
kernel_size=kernel_size,
dilation_rate=dilation_rate,
data_format=data_format,
activation="relu",
padding="same",
name="conv2D_dilated",
)(x)
keras_model = keras.models.Model(inputs=inputs, outputs=y)
data_shape = (batch_size,) + input_shape
data = np.random.random_sample(data_shape)
keras_output = keras_model.predict(data)
_, out_tensor_name, engine = keras_to_tensorrt(
keras_model,
input_dims=input_shape,
max_batch_size=batch_size,
parser=parser,
)
tensorrt_output = engine.infer(data)[out_tensor_name]
assert keras_output.shape == tensorrt_output.shape
assert np.allclose(keras_output, tensorrt_output, atol=1e-2)
# @pytest.mark.parametrize(
# "model, nlayers, data_format, use_batch_norm, input_shape,"
# "padding, pooling_type, batch_size, parser",
# [
# (HelNet, 6, "channels_first", True, (3, 96, 64), "valid", "AVE", 2, "uff"),
# (
# HelNet,
# 6,
# "channels_first",
# True,
# (3, 128, 128),
# "same",
# "AVE",
# 4,
# "caffe",
# ),
# ],
# )
# @pytest.mark.script_launch_mode("subprocess")
# def test_app(
# self,
# script_runner,
# tmpdir,
# model,
# nlayers,
# data_format,
# use_batch_norm,
# input_shape,
# padding,
# pooling_type,
# batch_size,
# parser,
# ):
# """Test TRT export from app."""
# with tf.device("cpu:0"):
# # Creating graph on CPU to leave GPU memory to TensorRT.
# keras_filename = os.path.join(str(tmpdir), "model.h5")
# trt_filename = os.path.join(str(tmpdir), "model.trt")
# inputs = keras.layers.Input(shape=input_shape)
# m = model(
# nlayers, inputs, use_batch_norm=use_batch_norm, data_format=data_format
# )
# m.save(keras_filename)
# extra_args = []
# extra_args.extend(["--input_dims", ",".join([str(i) for i in input_shape])])
# extra_args.extend(["--parser", parser])
# extra_args.extend(["--format", "tensorrt"])
# extra_args.extend(["--output_file", trt_filename])
# extra_args.extend(["--random_data"])
# extra_args.extend(["-v"])
# env = os.environ.copy()
# env["CUDA_VISIBLE_DEVICES"] = "0"
# script = "app.py"
# # Path adjustment for bazel tests
# if os.path.exists(os.path.join("nvidia_tao_tf1/core/export", script)):
# script = os.path.join("nvidia_tao_tf1/core/export", script)
# ret = script_runner.run(script, keras_filename, env=env, *extra_args)
# assert ret.success, "Process returned error: %s error trace: %s" % (
# ret.success,
# ret.stderr,
# )
# assert os.path.isfile(trt_filename)
# assert "Elapsed time" in ret.stderr, "Read: %s" % ret.stderr
@pytest.mark.parametrize(
"model, nlayers, input_shape, batch_size, parser",
[(HelNet, 6, (3, 96, 64), 2, "uff"), (HelNet, 6, (3, 128, 128), 4, "caffe")],
)
def test_3d_softmax(self, tmpdir, model, nlayers, input_shape, batch_size, parser):
"""Test the models with average and max pooling convert to TensorRT correctly."""
with tf.device("cpu:0"):
# Creating graph on CPU to leave GPU memory to TensorRT.
inputs = keras.layers.Input(shape=input_shape)
keras_model = model(nlayers, inputs, data_format="channels_first")
x = keras_model.outputs[0]
# walk around of a bug in TRT7.0/7.1 at this moment
# see https://nvbugswb.nvidia.com/NvBugs5/SWBug.aspx?bugid=200603619&cmtNo=
# remove this trick once this bug is fixed in a future version of TRT
# x = keras.layers.Softmax(axis=1)(x)
keras_model = keras.models.Model(inputs=inputs, outputs=x)
data_shape = (batch_size,) + input_shape
data = np.random.random_sample(data_shape)
keras_output = keras_model.predict(data)
_, out_tensor_name, engine = keras_to_tensorrt(
keras_model,
input_dims=input_shape,
max_batch_size=batch_size,
parser=parser,
)
tensorrt_output = engine.infer(data)[out_tensor_name]
assert keras_output.shape == tensorrt_output.shape
assert np.allclose(keras_output, tensorrt_output, atol=1e-2)
@pytest.mark.parametrize(
"fp32_layer_names, fp16_layer_names, \
expected_fp32_layer_names, expected_fp16_layer_names",
[
(
[],
["softmax_1"],
[],
["softmax_1/transpose", "softmax_1/Softmax", "softmax_1/transpose_1"],
),
(
["softmax_1"],
[],
["softmax_1/transpose", "softmax_1/Softmax", "softmax_1/transpose_1"],
[],
),
],
)
def test_mixed_precision(
self,
tmpdir,
fp32_layer_names,
fp16_layer_names,
expected_fp32_layer_names,
expected_fp16_layer_names,
):
"""Test INT8 based mixed precision inference."""
input_shape = (3, 96, 64)
layer_num = 6
batch_size = 16
nbatches = 1
# Define the model.
inputs = keras.layers.Input(shape=input_shape)
keras_model = HelNet(layer_num, inputs, data_format="channels_first")
x = keras_model.outputs[0]
x = keras.layers.Softmax(axis=1)(x)
keras_model = keras.models.Model(inputs=inputs, outputs=x)
# Prepare calibration data and save to a file.
tensor_filename = os.path.join(str(tmpdir), "tensor.dump")
with TensorFile(tensor_filename, "w") as f:
cali_data = np.random.randn(
batch_size, input_shape[0], input_shape[1], input_shape[2]
)
f.write(cali_data)
# Prepare a dummy calibration table file.
cal_cache_filename = os.path.join(str(tmpdir), "cal.bin")
# Export TensorRT mixed precision model, and check the correctness.
try:
with mock.patch(
"nvidia_tao_tf1.core.export._tensorrt._set_layer_precision",
side_effect=_set_excluded_layer_precision,
) as spied_set_excluded_layer_precision:
_, _, _ = keras_to_tensorrt(
keras_model,
dtype="int8",
input_dims=input_shape,
max_batch_size=2,
calibration_data_filename=tensor_filename,
calibration_cache_filename=cal_cache_filename,
calibration_n_batches=nbatches,
calibration_batch_size=batch_size,
fp32_layer_names=fp32_layer_names,
fp16_layer_names=fp16_layer_names,
parser="uff",
)
arg_fp32_layer_names = spied_set_excluded_layer_precision.call_args[1][
"fp32_layer_names"
]
arg_fp16_layer_names = spied_set_excluded_layer_precision.call_args[1][
"fp16_layer_names"
]
arg_network = spied_set_excluded_layer_precision.call_args[1]["network"]
res_fp32_layer_names = []
res_fp16_layer_names = []
for layer in arg_network:
if layer.precision == trt.float32:
res_fp32_layer_names.append(layer.name)
elif layer.precision == trt.float16:
res_fp16_layer_names.append(layer.name)
assert arg_fp32_layer_names == fp32_layer_names
assert arg_fp16_layer_names == fp16_layer_names
assert res_fp32_layer_names == expected_fp32_layer_names
assert res_fp16_layer_names == expected_fp16_layer_names
except AttributeError as e:
logger.warning(str(e))
pytest.skip(str(e))
@pytest.mark.parametrize(
"input_shape, batch_size, parser", [((3, 15, 30), 2, "uff")]
)
def test_model_in_model(self, tmpdir, input_shape, batch_size, parser):
"""Test the model-in-model converts to TensorRT correctly."""
with tf.device("cpu:0"):
# Creating graph on CPU to leave GPU memory to TensorRT.
inputs = keras.layers.Input(shape=input_shape)
x = keras.layers.Flatten()(inputs)
x = keras.layers.Dense(128, activation="tanh")(x)
inner_model = keras.models.Model(inputs=inputs, outputs=x)
outer_inputs = keras.layers.Input(shape=input_shape)
outer_model = keras.models.Model(
inputs=outer_inputs, outputs=inner_model(outer_inputs)
)
data_shape = (batch_size,) + input_shape
data = np.random.random_sample(data_shape)
keras_output = outer_model.predict(data)
_, out_tensor_name, engine = keras_to_tensorrt(
outer_model,
input_dims=input_shape,
max_batch_size=batch_size,
parser=parser,
)
tensorrt_output = engine.infer(data)[out_tensor_name].squeeze()
assert keras_output.shape == tensorrt_output.shape
assert np.allclose(keras_output, tensorrt_output, atol=1e-2)
@pytest.mark.parametrize("floatx", ["float32", "float16"])
def test_get_model_input_dtype(tmpdir, floatx):
"""Test that get_model_input_dtype function returns the correct dtype."""
try:
model_filename = os.path.join(str(tmpdir), "model.h5")
keras.backend.set_floatx(floatx)
inputs = keras.layers.Input(shape=(4,))
x = keras.layers.Dense(4)(inputs)
y = keras.layers.Dense(4)(x)
model = keras.models.Model(inputs=inputs, outputs=y)
model.save(model_filename)
dtype = get_model_input_dtype(model_filename)
assert dtype == floatx
finally:
# Set Keras float type to the default float32, so that other tests are not affected.
keras.backend.set_floatx("float32")
| tao_tensorflow1_backend-main | nvidia_tao_tf1/core/export/test_export.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modulus export APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import logging
import google.protobuf.text_format as text_format
import keras
import numpy as np
logger = logging.getLogger(__name__) # noqa
from nvidia_tao_tf1.core.export.caffe import caffe_pb2
from nvidia_tao_tf1.core.export.caffe.net_spec import layers as L
from nvidia_tao_tf1.core.export.caffe.net_spec import NetSpec
class CaffeExporter(object):
"""A class to handle exporting a Keras model to Caffe."""
def __init__(self):
"""Initialization routine."""
self._exported_layers = {}
self._special_paddings = {}
def _add_activation_layer(
self, name, activation_type, previous_layer, parameters=None
):
"""Add an activation layer to the model.
Args:
name (str): name of the layer to add activation to.
activation_type (str): Keras string identifier of activation to return.
previous_layer (layer): layer to append activation to.
parameters (list): optional list of parameters needed for a given activation layer.
"""
if activation_type == "linear":
layer = None
elif activation_type == "sigmoid":
layer = L.Sigmoid(previous_layer)
elif activation_type == "softmax":
layer = L.Softmax(previous_layer)
elif activation_type == "softmax_with_axis":
# Requires one parameter: the axis over which to apply the softmax.
axis = parameters[0]
assert (
axis == 1
), "Only softmax over axis = 1 is supported in TensorRT at the moment."
layer = L.Softmax(previous_layer, axis=axis)
elif activation_type == "relu":
layer = L.ReLU(previous_layer)
elif activation_type == "tanh":
layer = L.TanH(previous_layer)
else:
raise ValueError("Unhandled activation type: %s" % activation_type)
if layer is not None:
name = self._get_activation_layer_name(name, activation_type)
self._add_layer(name, layer)
def _add_batchnorm_layer(self, keras_layer):
"""Add a batchnorm layer to the model.
Args:
keras_layer: the Keras layer to convert.
"""
assert keras_layer.scale, "Expect scaling to be enabled for batch norm."
assert keras_layer.center, "Expect centering to be enabled for batch norm."
caffe_layer = L.Scale(
self._get_previous_layer(keras_layer),
axis=keras_layer.axis,
bias_term=keras_layer.center,
)
self._add_layer(keras_layer.name, caffe_layer)
# Save weights.
weights = keras_layer.get_weights()
gamma, beta, moving_mean, moving_var = weights
# We need to divide the scaling factor (gamma) by the standard deviation.
denom = np.sqrt(moving_var + keras_layer.epsilon)
scale = gamma / denom
bias = beta - (gamma * moving_mean) / denom
self._params[keras_layer.name] = [scale, bias]
def _add_concatenate_layer(self, keras_layer):
"""Add a concatenation layer to the model.
Args:
keras_layer: the Keras layer to convert.
"""
previous_layers = keras_layer._inbound_nodes[-1].inbound_layers
logger.debug(
"Concatenate layers %s along axis=%d",
repr([l.name for l in previous_layers]),
keras_layer.axis,
)
bottom_layers = self._get_bottom_layers(previous_layers)
caffe_layer = L.Concat(*bottom_layers, axis=keras_layer.axis)
self._add_layer(keras_layer.name, caffe_layer)
def _add_conv_layer(self, keras_layer, pad_h=None, pad_w=None):
"""Add a conv layer to the model.
This applies to both regular and transpose convolutions.
Args:
keras_layer: the Keras layer to convert.
"""
kernel_h, kernel_w = keras_layer.kernel_size
stride_h, stride_w = keras_layer.strides
# Set padding according to border mode.
if pad_h is None or pad_w is None:
if keras_layer.padding == "valid":
pad_w, pad_h = 0, 0
elif keras_layer.padding == "same":
if type(keras_layer) == keras.layers.convolutional.Conv2D:
dilation_h, dilation_w = keras_layer.dilation_rate
# In the case of no dilation, i.e. dilation == 1, pad = kernel // 2.
pad_h = ((kernel_h - 1) * dilation_h + 1) // 2
pad_w = ((kernel_w - 1) * dilation_w + 1) // 2
else:
dilation_h, dilation_w = keras_layer.dilation_rate
# In the case of no dilation, i.e. dilation == 1, pad = kernel // 2.
pad_h = ((kernel_h - 1) * dilation_h + 1 - stride_h) // 2
pad_w = ((kernel_w - 1) * dilation_w + 1 - stride_w) // 2
else:
raise ValueError("Unknown padding type: %s" % keras_layer.padding)
else:
if keras_layer.padding == "valid":
pass
elif keras_layer.padding == "same":
dilation_h, dilation_w = keras_layer.dilation_rate
# In the case of no dilation, i.e. dilation == 1, pad = kernel // 2.
pad_h = pad_h + ((kernel_h - 1) * dilation_h + 1) // 2
pad_w = pad_w + ((kernel_w - 1) * dilation_w + 1) // 2
else:
raise ValueError("Unknown padding type: %s" % keras_layer.padding)
if type(keras_layer) == keras.layers.convolutional.Conv2D:
layer_func = L.Convolution
else:
layer_func = L.Deconvolution
caffe_layer = layer_func(
self._get_previous_layer(keras_layer),
convolution_param=dict(
num_output=keras_layer.filters,
kernel_h=kernel_h,
kernel_w=kernel_w,
pad_h=pad_h,
pad_w=pad_w,
stride_h=stride_h,
stride_w=stride_w,
dilation=list(keras_layer.dilation_rate),
),
)
self._add_layer(keras_layer.name, caffe_layer)
self._add_activation_layer(
keras_layer.name,
keras_layer.activation.__name__,
self._exported_layers[keras_layer.name],
)
# Save weights.
weights = keras_layer.get_weights()
kernels = weights[0]
biases = (
np.zeros((kernels.shape[-1]), dtype=kernels.dtype)
if len(weights) == 1
else weights[1]
)
# Convert kernel shape from Keras to Caffe ordering.
# For convolutions:
# Keras (h, w, n_in, n_out) to Caffe (n_out, n_in, h, w).
# For transpose convolutions:
# Keras (h, w, n_out, n_in) to Caffe (n_in, n_out, h, w).
# The same transpose operation works in either case.
kernels = kernels.transpose(3, 2, 0, 1)
self._params[keras_layer.name] = kernels, biases
def _add_dense_layer(self, keras_layer):
"""Add a dense layer to the model.
Args:
keras_layer: the Keras layer to convert.
"""
caffe_layer = L.InnerProduct(
self._get_previous_layer(keras_layer),
inner_product_param=dict(num_output=keras_layer.units),
)
self._add_layer(keras_layer.name, caffe_layer)
self._add_activation_layer(
keras_layer.name,
keras_layer.activation.__name__,
self._exported_layers[keras_layer.name],
)
# Save weights.
weights, biases = keras_layer.get_weights()
weights = weights.transpose(1, 0)
self._params[keras_layer.name] = weights, biases
def _add_eltwise_layer(self, keras_layer, operation):
previous_layers = keras_layer._inbound_nodes[-1].inbound_layers
bottom_layers = self._get_bottom_layers(previous_layers)
if operation == "add":
operation = caffe_pb2.EltwiseParameter.SUM
elif operation == "subtract":
caffe_layer = L.Power(bottom_layers[1], power_param=dict(scale=-1))
self._add_layer(previous_layers[1].name + "_negate", caffe_layer)
bottom_layers[1] = caffe_layer
operation = caffe_pb2.EltwiseParameter.SUM
elif operation == "multiply":
operation = caffe_pb2.EltwiseParameter.PROD
else:
raise ValueError("Unsupported operation: %s" % operation)
caffe_layer = L.Eltwise(*bottom_layers, eltwise_param=dict(operation=operation))
self._add_layer(keras_layer.name, caffe_layer)
def _add_flatten_layer(self, keras_layer):
"""Add a flatten layer to the model.
Args:
keras_layer: the Keras layer to convert.
"""
caffe_layer = L.Flatten(self._get_previous_layer(keras_layer))
self._add_layer(keras_layer.name, caffe_layer)
def _add_input_layer(self, keras_layer, in_name):
"""Add an input layer.
Args:
keras_layer: the Keras layer to convert.
in_name: name to give to input layer.
"""
input_dim = keras_layer.batch_input_shape[1:]
# To ensure caffe-to-TensorRT export compatibility:
# TensorRT assumes all input shape to be 4-dimensional: [B, H, W, C].
# If the input is a vector, dim should be [1, 1, 1, C].
# If the input is an image, dim should be [1, H, W, C].
dim = ((1,) * (4 - len(input_dim))) + input_dim
caffe_layer = L.Input(shape=[dict(dim=list(dim))])
self._add_layer(in_name, caffe_layer)
logger.debug("Input shape: %s" % str(input_dim))
def _add_layer(self, name, layer):
"""Add a layer to the Caffe model."""
self._net_spec.tops[name] = layer # Replacing setattr(self._net_spec, name, layer) to avoid object access violation
self._exported_layers[name] = layer
def _add_pool2D_layer(self, keras_layer, pool_type):
"""Add a 2D pooling layer to the model.
Args:
keras_layer: the Keras layer to convert.
"""
kernel_h, kernel_w = keras_layer.pool_size
stride_h, stride_w = keras_layer.strides
# Set padding according to border mode.
if keras_layer.padding == "valid":
pad_w, pad_h = 0, 0
elif keras_layer.padding == "same":
pad_w, pad_h = (kernel_h - 1) // 2, (kernel_w - 1) // 2
else:
raise ValueError("Unknown padding type: %s" % keras_layer.padding)
pool_num = caffe_pb2.PoolingParameter.PoolMethod.Value(pool_type)
caffe_layer = L.Pooling(
self._get_previous_layer(keras_layer),
pooling_param=dict(
pool=pool_num,
kernel_h=kernel_h,
kernel_w=kernel_w,
pad_h=pad_h,
pad_w=pad_w,
stride_h=stride_h,
stride_w=stride_w,
),
)
self._add_layer(keras_layer.name, caffe_layer)
def _add_reshape_layer(self, keras_layer):
"""Add a reshape layer to the model.
Args:
keras_layer: the Keras layer to convert.
"""
shape = keras_layer.target_shape
# Prepend a "0" to the shape to denote the fact that
# we want to keep the batch dimension unchanged.
caffe_shape = list((0,) + shape)
caffe_layer = L.Reshape(
self._get_previous_layer(keras_layer),
reshape_param=dict(shape=dict(dim=caffe_shape)),
)
self._add_layer(keras_layer.name, caffe_layer)
def _create_net_spec(self):
"""Create an empty Caffe ``NetSpec``."""
self._net_spec = NetSpec()
self._params = {}
@staticmethod
def _get_activation_layer_name(name, activation_type):
"""Return Caffe "top" name to assign to an activation layer.
Args:
activation_type (str): activation type.
"""
if activation_type == "softmax_with_axis":
layer_name = "%s" % (name)
else:
layer_name = "%s/%s" % (name, activation_type.capitalize())
return layer_name
def _get_bottom_layers(self, previous_layers):
return [
self._exported_layers[self._get_caffe_top_name(l)] for l in previous_layers
]
@classmethod
def _get_caffe_top_name(cls, keras_layer):
"""Get the Caffe "top" assigned with a layer.
This handles the case where an activation layer is implicitly added
during Caffe conversion.
For example if we have a Keras layer like:
keras.layers.Conv2D(name='conv2d', activation='relu')
Then we will get two Caffe layers:
- ``Convolution(bottom='...', top='conv2d')``
- ``ReLU(bottom='conv2d', top='conv2d/Relu')``
In that case this function will return ``conv2d/Relu``.
Args:
keras_layer: Keras layer to get corresponding Caffe top of.
"""
name = keras_layer.name
if (
hasattr(keras_layer, "activation")
and keras_layer.activation.__name__ != "linear"
):
name = cls._get_activation_layer_name(name, keras_layer.activation.__name__)
return name
def _get_previous_layer(self, layer):
"""Return the preceding layer.
Raises an error if the specified layer has multiple inbound layers.
Args:
layer: the layer to get preceding layer of.
"""
inbound_layers = layer._inbound_nodes[-1].inbound_layers
if len(inbound_layers) > 1:
raise RuntimeError(
"This function does not support multiple "
"inbound nodes. Got %s" % len(inbound_layers)
)
inbound_layer = inbound_layers[0]
name = self._get_caffe_top_name(inbound_layer)
return self._exported_layers[name]
def export(self, model, prototxt_filename, model_filename, output_node_names):
"""Export Keras model to Caffe.
This creates two files:
- A "proto" file that defines the topology of the exported model.
- A "caffemodel" file that includes the weights of the exported model.
Args:
model (Model): Keras model to export.
prototxt_filename (str): file to write exported proto to.
model_filename (str): file to write exported model weights to.
output_node_names (list of str): list of model output node names as
as Caffe layer names. If not provided, the model output layers are used.
Returns:
The names of the input and output nodes. These must be
passed to the TensorRT optimization tool to identify
input and output blobs.
"""
# Get names of output nodes.
# If output node names are not given, use the model output layers.
if output_node_names is None:
out_names = [
self._get_caffe_top_name(layer) for layer in model._output_layers
]
else:
out_names = output_node_names
# Create list to save input node names.
in_names = []
# Explore the graph in Breadth First Search fashion, starting from the input layers.
layers_to_explore = copy.copy(model._input_layers)
self._create_net_spec()
# Loop until we have explored all layers.
while layers_to_explore:
logger.debug(
"Layers to explore: %s",
repr([layer.name for layer in layers_to_explore]),
)
# Pick a layer to explore from the list.
layer = layers_to_explore.pop(0)
inbound_layers = layer._inbound_nodes[-1].inbound_layers
predecessor_names = [self._get_caffe_top_name(l) for l in inbound_layers]
if not all([l in self._exported_layers for l in predecessor_names]):
# Some of the inbound layers have not been explored yet.
# Skip this layer for now, it will come back to the list
# of layers to explore as the outbound layer of one of the
# yet unexplored layers.
continue
logger.debug("Processing layer %s: type=%s" % (layer.name, type(layer)))
# Layer-specific handling.
if type(layer) == keras.layers.InputLayer:
in_name = self._get_caffe_top_name(layer)
self._add_input_layer(layer, in_name)
in_names.append(in_name)
elif type(layer) in [
keras.layers.convolutional.Conv2D,
keras.layers.convolutional.Conv2DTranspose,
]:
# Export Conv2D, and to handle ZeroPadding2D for Conv2D layer
conv_outbound_nodes = layer._outbound_nodes
layers_after_conv = [
node.outbound_layer for node in conv_outbound_nodes
]
if (
len(layers_after_conv) == 1
and type(layers_after_conv[0])
== keras.layers.convolutional.ZeroPadding2D
):
padding = layers_after_conv[0].padding
if (
padding[0][0] == padding[0][1]
and padding[1][0] == padding[1][1]
):
pad_h = padding[0][0]
pad_w = padding[1][0]
layer._outbound_nodes = layers_after_conv[0]._outbound_nodes
layer._outbound_nodes[0].inbound_layers = [layer]
self._special_paddings[
layer._outbound_nodes[0].outbound_layer.name
] = (pad_h, pad_w)
else:
raise ValueError("Asymmetric padding is not supported!")
if layer.name in self._special_paddings.keys():
self._add_conv_layer(
layer,
pad_h=self._special_paddings[layer.name][0],
pad_w=self._special_paddings[layer.name][1],
)
else:
self._add_conv_layer(layer)
elif type(layer) == keras.layers.normalization.BatchNormalization:
self._add_batchnorm_layer(layer)
elif type(layer) == keras.layers.core.Reshape:
self._add_reshape_layer(layer)
elif type(layer) in [
keras.layers.core.Dropout,
keras.layers.core.SpatialDropout2D,
]:
# Dropout is a pass-through during inference, just pretend we
# have exported this layer by pointing to the previous layer
# in the graph.
self._exported_layers[layer.name] = self._get_previous_layer(layer)
elif type(layer) == keras.layers.core.Activation:
self._add_activation_layer(
layer.name,
layer.activation.__name__,
self._get_previous_layer(layer),
)
elif type(layer) == keras.layers.core.Dense:
self._add_dense_layer(layer)
elif type(layer) == keras.layers.core.Flatten:
self._add_flatten_layer(layer)
elif type(layer) == keras.layers.pooling.MaxPooling2D:
self._add_pool2D_layer(layer, pool_type="MAX")
elif type(layer) == keras.layers.pooling.AveragePooling2D:
self._add_pool2D_layer(layer, pool_type="AVE")
elif type(layer) == keras.layers.Concatenate:
self._add_concatenate_layer(layer)
elif type(layer) == keras.engine.training.Model:
# This is a model-in-model type of architecture and this layer
# is a container for other layers. Look into the first
# layer and keep following outbound nodes.
layer = layer.layers[0]
elif type(layer) == keras.layers.Softmax:
self._add_activation_layer(
layer.name,
"softmax_with_axis",
self._get_previous_layer(layer),
parameters=[layer.axis],
)
elif type(layer) == keras.layers.Add:
self._add_eltwise_layer(layer, operation="add")
elif type(layer) == keras.layers.Subtract:
self._add_eltwise_layer(layer, operation="subtract")
elif type(layer) == keras.layers.Multiply:
self._add_eltwise_layer(layer, operation="multiply")
else:
raise ValueError("Unhandled layer type: %s" % type(layer))
outbound_nodes = layer._outbound_nodes
layers_to_explore.extend([node.outbound_layer for node in outbound_nodes])
if hasattr(layer, "data_format"):
if layer.data_format != "channels_first":
raise ValueError("Only 'channels_first' is supported.")
logger.debug("Explored layers: %s", repr(self._exported_layers.keys()))
# If output node names are provided, then remove layers after them. Start from the output
# nodes, move towards the input, and mark visited layers. Unmarked layers are removed.
if output_node_names is not None:
self._remove_layers_after_outputs(output_node_names)
self._save_protobuf(prototxt_filename)
self._save_weights(prototxt_filename, model_filename)
return in_names, out_names
def _remove_layers_after_outputs(self, output_node_names):
"""Remove unnecessary layers after the given output node names."""
self._marked_layers = set()
for output_node_name in output_node_names:
layer = self._exported_layers.get(output_node_name)
if layer is None:
raise KeyError(
"Output node %s does not exist in the Caffe model."
% output_node_name
)
# Mark the output layer and its inputs recursively.
self._mark_layers(layer)
# Find layers that were not marked.
exported_layers_set = set(self._exported_layers.values())
unmarked_layers = exported_layers_set.difference(self._marked_layers)
# Remove the unmarked layers from the Caffe NetSpec and dictionary of parameters.
if unmarked_layers:
# Get a mapping from the layer objects to layer names.
layer_to_name = {v: k for k, v in self._exported_layers.items()}
for unmarked_layer in unmarked_layers:
layer_name = layer_to_name[unmarked_layer]
self._net_spec.tops.pop(layer_name)
self._params.pop(
layer_name, None
) # Some layers do not have any parameters.
def _mark_layers(self, layer):
"""Mark layers to be exported by adding them to a set of marked layers."""
# Check if the path to the inputs is already traversed.
if layer in self._marked_layers:
return
self._marked_layers.add(layer)
# Mark recursively layers before the current layer.
for input_layer in layer.fn.inputs:
self._mark_layers(input_layer)
def _save_protobuf(self, prototxt_filename):
"""Write protobuf out."""
with open(prototxt_filename, "w") as f:
f.write(str(self._net_spec.to_proto()))
def _save_weights(self, prototxt_filename, model_filename):
"""Write weights out."""
net = caffe_pb2.NetParameter()
text_format.Merge(open(prototxt_filename, "r").read(), net)
for layer in net.layer:
layer.phase = caffe_pb2.TEST
name = layer.name
if name in self._params:
for source_param in self._params[name]:
blob = layer.blobs.add()
# Add dims.
for dim in source_param.shape:
blob.shape.dim.append(dim)
# Add blob.
blob.data.extend(source_param.flat)
with open(model_filename, "wb") as f:
f.write(net.SerializeToString())
def keras_to_caffe(model, prototxt_filename, model_filename, output_node_names=None):
"""Export a Keras model to Caffe.
This creates two files:
- A "proto" file that defines the topology of the exported model.
- A "caffemodel" file that includes the weights of the exported model.
Args:
model (Model): Keras model to export.
proto_filename (str): file to write exported proto to.
output_node_names (list of str): list of model output node names as
as caffe layer names. if not provided, then the last layer is assumed
to be the output node.
Returns:
The names of the input and output nodes. These must be
passed to the TensorRT optimization tool to identify
input and output blobs.
"""
exporter = CaffeExporter()
in_names, out_names = exporter.export(
model, prototxt_filename, model_filename, output_node_names
)
# Return a string instead of a list if there is only one input node.
if len(in_names) == 1:
in_names = in_names[0]
# Return a string instead of a list if there is only one output node.
if len(out_names) == 1:
out_names = out_names[0]
return in_names, out_names
| tao_tensorflow1_backend-main | nvidia_tao_tf1/core/export/caffe/caffe.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# COPYRIGHT
#
# All contributions by the University of California:
# Copyright (c) 2014-2017 The Regents of the University of California (Regents)
# All rights reserved.
#
# All other contributions:
# Copyright (c) 2014-2017, the respective contributors
# All rights reserved.
#
# Caffe uses a shared copyright model: each contributor holds copyright over
# their contributions to Caffe. The project versioning records all such
# contribution and copyright details. If a contributor wants to further mark
# their specific copyright on a particular contribution, they should indicate
# their copyright solely in the commit message of the change when it is
# committed.
#
# LICENSE
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# CONTRIBUTION AGREEMENT
#
# By contributing to the BVLC/caffe repository through pull-request, comment,
# or otherwise, the contributor releases their content to the
# license and copyright terms herein.
#
"""Adaptation from Caffe's net_spec.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import Counter, OrderedDict
from nvidia_tao_tf1.core.export.caffe import caffe_pb2
import six
def param_name_dict():
"""Find out the correspondence between layer names and parameter names."""
layer = caffe_pb2.LayerParameter()
# get all parameter names (typically underscore case) and corresponding
# type names (typically camel case), which contain the layer names
# (note that not all parameters correspond to layers, but we'll ignore that)
param_names = [f.name for f in layer.DESCRIPTOR.fields if f.name.endswith("_param")]
param_type_names = [type(getattr(layer, s)).__name__ for s in param_names]
# strip the final '_param' or 'Parameter'
param_names = [s[: -len("_param")] for s in param_names]
param_type_names = [s[: -len("Parameter")] for s in param_type_names]
return dict(zip(param_type_names, param_names))
def to_proto(*tops):
"""Generate a NetParameter that contains all layers needed to compute all arguments."""
layers = OrderedDict()
autonames = Counter()
for top in tops:
top.fn._to_proto(layers, {}, autonames)
net = caffe_pb2.NetParameter()
net.layer.extend(layers.values())
return net
def assign_proto(proto, name, val):
"""assign_proto method.
Assign a Python object to a protobuf message, based on the Python
type (in recursive fashion). Lists become repeated fields/messages, dicts
become messages, and other types are assigned directly. For convenience,
repeated fields whose values are not lists are converted to single-element
lists; e.g., `my_repeated_int_field=3` is converted to
`my_repeated_int_field=[3]`.
"""
is_repeated_field = hasattr(getattr(proto, name), "extend")
if is_repeated_field and not isinstance(val, list):
val = [val]
if isinstance(val, list):
if isinstance(val[0], dict):
for item in val:
proto_item = getattr(proto, name).add()
for k, v in six.iteritems(item):
assign_proto(proto_item, k, v)
else:
getattr(proto, name).extend(val)
elif isinstance(val, dict):
for k, v in six.iteritems(val):
assign_proto(getattr(proto, name), k, v)
else:
setattr(proto, name, val)
class Top(object):
"""A Top specifies a single output blob (which could be one of several produced by a layer)."""
def __init__(self, fn, n):
"""__init__ method."""
self.fn = fn
self.n = n
def to_proto(self):
"""Generate a NetParameter that contains all layers needed to computethis top."""
return to_proto(self)
def _to_proto(self, layers, names, autonames):
"""_to_proto method."""
return self.fn._to_proto(layers, names, autonames)
class Function(object):
"""Function object.
A Function specifies a layer, its parameters, and its inputs (which
are Tops from other layers).
"""
def __init__(self, type_name, inputs, params):
"""__init__ method."""
self.type_name = type_name
for index, inp in enumerate(inputs):
if not isinstance(inp, Top):
raise TypeError(
"%s input %d is not a Top (type is %s)"
% (type_name, index, type(inp))
)
self.inputs = inputs
self.params = params
self.ntop = self.params.get("ntop", 1)
# use del to make sure kwargs are not double-processed as layer params
if "ntop" in self.params:
del self.params["ntop"]
self.in_place = self.params.get("in_place", False)
if "in_place" in self.params:
del self.params["in_place"]
self.tops = tuple(Top(self, n) for n in range(self.ntop))
def _get_name(self, names, autonames):
if self not in names and self.ntop > 0:
names[self] = self._get_top_name(self.tops[0], names, autonames)
elif self not in names:
autonames[self.type_name] += 1
names[self] = self.type_name + str(autonames[self.type_name])
return names[self]
def _get_top_name(self, top, names, autonames):
if top not in names:
autonames[top.fn.type_name] += 1
names[top] = top.fn.type_name + str(autonames[top.fn.type_name])
return names[top]
def _to_proto(self, layers, names, autonames):
if self in layers:
return
bottom_names = []
for inp in self.inputs:
inp._to_proto(layers, names, autonames)
bottom_names.append(layers[inp.fn].top[inp.n])
layer = caffe_pb2.LayerParameter()
layer.type = self.type_name
layer.bottom.extend(bottom_names)
if self.in_place:
layer.top.extend(layer.bottom)
else:
for top in self.tops:
layer.top.append(self._get_top_name(top, names, autonames))
layer.name = self._get_name(names, autonames)
for k, v in six.iteritems(self.params):
# special case to handle generic *params
if k.endswith("param"):
assign_proto(layer, k, v)
else:
try:
assign_proto(
getattr(layer, _param_names[self.type_name] + "_param"), k, v
)
except (AttributeError, KeyError):
assign_proto(layer, k, v)
layers[self] = layer
class NetSpec(object):
"""NetSpec object.
A NetSpec contains a set of Tops (assigned directly as attributes).
Calling NetSpec.to_proto generates a NetParameter containing all of the
layers needed to produce all of the assigned Tops, using the assigned
names.
"""
def __init__(self):
"""__init__ method."""
super(NetSpec, self).__setattr__("tops", OrderedDict())
def __setattr__(self, name, value):
"""__setattr__ method."""
self.tops[name] = value
def __getattr__(self, name):
"""__getattr__ method."""
return self.tops[name]
def __setitem__(self, key, value):
"""__setitem__ method."""
self.__setattr__(key, value)
def __getitem__(self, item):
"""__getitem__ method."""
return self.__getattr__(item)
def to_proto(self):
"""to_proto method."""
names = {v: k for k, v in six.iteritems(self.tops)}
autonames = Counter()
layers = OrderedDict()
for _, top in six.iteritems(self.tops):
top._to_proto(layers, names, autonames)
net = caffe_pb2.NetParameter()
net.layer.extend(layers.values())
return net
class Layers(object):
"""Layers object.
A Layers object is a pseudo-module which generates functions that specify
layers; e.g., Layers().Convolution(bottom, kernel_size=3) will produce a Top
specifying a 3x3 convolution applied to bottom.
"""
def __getattr__(self, name):
"""__getattr__ method."""
def layer_fn(*args, **kwargs):
fn = Function(name, args, kwargs)
if fn.ntop == 0:
return fn
if fn.ntop == 1:
return fn.tops[0]
return fn.tops
return layer_fn
_param_names = param_name_dict()
layers = Layers()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/core/export/caffe/net_spec.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modulus caffe export APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.core.export.caffe import caffe_pb2
from nvidia_tao_tf1.core.export.caffe import net_spec
from nvidia_tao_tf1.core.export.caffe.caffe import CaffeExporter, keras_to_caffe
__all__ = ("CaffeExporter", "keras_to_caffe", "caffe_pb2", "net_spec")
| tao_tensorflow1_backend-main | nvidia_tao_tf1/core/export/caffe/__init__.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: caffe.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name="caffe.proto",
package="caffe",
serialized_pb=_b(
'\n\x0b\x63\x61\x66\x66\x65.proto\x12\x05\x63\x61\x66\x66\x65"\x1c\n\tBlobShape\x12\x0f\n\x03\x64im\x18\x01 \x03(\x03\x42\x02\x10\x01"\xcc\x01\n\tBlobProto\x12\x1f\n\x05shape\x18\x07 \x01(\x0b\x32\x10.caffe.BlobShape\x12\x10\n\x04\x64\x61ta\x18\x05 \x03(\x02\x42\x02\x10\x01\x12\x10\n\x04\x64iff\x18\x06 \x03(\x02\x42\x02\x10\x01\x12\x17\n\x0b\x64ouble_data\x18\x08 \x03(\x01\x42\x02\x10\x01\x12\x17\n\x0b\x64ouble_diff\x18\t \x03(\x01\x42\x02\x10\x01\x12\x0e\n\x03num\x18\x01 \x01(\x05:\x01\x30\x12\x13\n\x08\x63hannels\x18\x02 \x01(\x05:\x01\x30\x12\x11\n\x06height\x18\x03 \x01(\x05:\x01\x30\x12\x10\n\x05width\x18\x04 \x01(\x05:\x01\x30"2\n\x0f\x42lobProtoVector\x12\x1f\n\x05\x62lobs\x18\x01 \x03(\x0b\x32\x10.caffe.BlobProto"\x81\x01\n\x05\x44\x61tum\x12\x10\n\x08\x63hannels\x18\x01 \x01(\x05\x12\x0e\n\x06height\x18\x02 \x01(\x05\x12\r\n\x05width\x18\x03 \x01(\x05\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\x12\r\n\x05label\x18\x05 \x01(\x05\x12\x12\n\nfloat_data\x18\x06 \x03(\x02\x12\x16\n\x07\x65ncoded\x18\x07 \x01(\x08:\x05\x66\x61lse"\x8a\x02\n\x0f\x46illerParameter\x12\x16\n\x04type\x18\x01 \x01(\t:\x08\x63onstant\x12\x10\n\x05value\x18\x02 \x01(\x02:\x01\x30\x12\x0e\n\x03min\x18\x03 \x01(\x02:\x01\x30\x12\x0e\n\x03max\x18\x04 \x01(\x02:\x01\x31\x12\x0f\n\x04mean\x18\x05 \x01(\x02:\x01\x30\x12\x0e\n\x03std\x18\x06 \x01(\x02:\x01\x31\x12\x12\n\x06sparse\x18\x07 \x01(\x05:\x02-1\x12\x42\n\rvariance_norm\x18\x08 \x01(\x0e\x32#.caffe.FillerParameter.VarianceNorm:\x06\x46\x41N_IN"4\n\x0cVarianceNorm\x12\n\n\x06\x46\x41N_IN\x10\x00\x12\x0b\n\x07\x46\x41N_OUT\x10\x01\x12\x0b\n\x07\x41VERAGE\x10\x02"\x8e\x02\n\x0cNetParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05input\x18\x03 \x03(\t\x12%\n\x0binput_shape\x18\x08 \x03(\x0b\x32\x10.caffe.BlobShape\x12\x11\n\tinput_dim\x18\x04 \x03(\x05\x12\x1d\n\x0e\x66orce_backward\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x1e\n\x05state\x18\x06 \x01(\x0b\x32\x0f.caffe.NetState\x12\x19\n\ndebug_info\x18\x07 \x01(\x08:\x05\x66\x61lse\x12$\n\x05layer\x18\x64 \x03(\x0b\x32\x15.caffe.LayerParameter\x12\'\n\x06layers\x18\x02 \x03(\x0b\x32\x17.caffe.V1LayerParameter"\xc3\n\n\x0fSolverParameter\x12\x0b\n\x03net\x18\x18 \x01(\t\x12&\n\tnet_param\x18\x19 \x01(\x0b\x32\x13.caffe.NetParameter\x12\x11\n\ttrain_net\x18\x01 \x01(\t\x12\x10\n\x08test_net\x18\x02 \x03(\t\x12,\n\x0ftrain_net_param\x18\x15 \x01(\x0b\x32\x13.caffe.NetParameter\x12+\n\x0etest_net_param\x18\x16 \x03(\x0b\x32\x13.caffe.NetParameter\x12$\n\x0btrain_state\x18\x1a \x01(\x0b\x32\x0f.caffe.NetState\x12#\n\ntest_state\x18\x1b \x03(\x0b\x32\x0f.caffe.NetState\x12\x11\n\ttest_iter\x18\x03 \x03(\x05\x12\x18\n\rtest_interval\x18\x04 \x01(\x05:\x01\x30\x12 \n\x11test_compute_loss\x18\x13 \x01(\x08:\x05\x66\x61lse\x12!\n\x13test_initialization\x18 \x01(\x08:\x04true\x12\x0f\n\x07\x62\x61se_lr\x18\x05 \x01(\x02\x12\x0f\n\x07\x64isplay\x18\x06 \x01(\x05\x12\x17\n\x0c\x61verage_loss\x18! \x01(\x05:\x01\x31\x12\x10\n\x08max_iter\x18\x07 \x01(\x05\x12\x14\n\titer_size\x18$ \x01(\x05:\x01\x31\x12\x11\n\tlr_policy\x18\x08 \x01(\t\x12\r\n\x05gamma\x18\t \x01(\x02\x12\r\n\x05power\x18\n \x01(\x02\x12\x10\n\x08momentum\x18\x0b \x01(\x02\x12\x14\n\x0cweight_decay\x18\x0c \x01(\x02\x12\x1f\n\x13regularization_type\x18\x1d \x01(\t:\x02L2\x12\x10\n\x08stepsize\x18\r \x01(\x05\x12\x11\n\tstepvalue\x18" \x03(\x05\x12\x1a\n\x0e\x63lip_gradients\x18# \x01(\x02:\x02-1\x12\x13\n\x08snapshot\x18\x0e \x01(\x05:\x01\x30\x12\x17\n\x0fsnapshot_prefix\x18\x0f \x01(\t\x12\x1c\n\rsnapshot_diff\x18\x10 \x01(\x08:\x05\x66\x61lse\x12K\n\x0fsnapshot_format\x18% \x01(\x0e\x32%.caffe.SolverParameter.SnapshotFormat:\x0b\x42INARYPROTO\x12;\n\x0bsolver_mode\x18\x11 \x01(\x0e\x32!.caffe.SolverParameter.SolverMode:\x03GPU\x12\x14\n\tdevice_id\x18\x12 \x01(\x05:\x01\x30\x12\x17\n\x0brandom_seed\x18\x14 \x01(\x03:\x02-1\x12\x11\n\x04type\x18( \x01(\t:\x03SGD\x12\x14\n\x05\x64\x65lta\x18\x1f \x01(\x02:\x05\x31\x65-08\x12\x18\n\tmomentum2\x18\' \x01(\x02:\x05\x30.999\x12\x17\n\trms_decay\x18& \x01(\x02:\x04\x30.99\x12\x19\n\ndebug_info\x18\x17 \x01(\x08:\x05\x66\x61lse\x12"\n\x14snapshot_after_train\x18\x1c \x01(\x08:\x04true\x12;\n\x0bsolver_type\x18\x1e \x01(\x0e\x32!.caffe.SolverParameter.SolverType:\x03SGD\x12\x1f\n\x11layer_wise_reduce\x18) \x01(\x08:\x04true"+\n\x0eSnapshotFormat\x12\x08\n\x04HDF5\x10\x00\x12\x0f\n\x0b\x42INARYPROTO\x10\x01"\x1e\n\nSolverMode\x12\x07\n\x03\x43PU\x10\x00\x12\x07\n\x03GPU\x10\x01"U\n\nSolverType\x12\x07\n\x03SGD\x10\x00\x12\x0c\n\x08NESTEROV\x10\x01\x12\x0b\n\x07\x41\x44\x41GRAD\x10\x02\x12\x0b\n\x07RMSPROP\x10\x03\x12\x0c\n\x08\x41\x44\x41\x44\x45LTA\x10\x04\x12\x08\n\x04\x41\x44\x41M\x10\x05"l\n\x0bSolverState\x12\x0c\n\x04iter\x18\x01 \x01(\x05\x12\x13\n\x0blearned_net\x18\x02 \x01(\t\x12!\n\x07history\x18\x03 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x17\n\x0c\x63urrent_step\x18\x04 \x01(\x05:\x01\x30"N\n\x08NetState\x12!\n\x05phase\x18\x01 \x01(\x0e\x32\x0c.caffe.Phase:\x04TEST\x12\x10\n\x05level\x18\x02 \x01(\x05:\x01\x30\x12\r\n\x05stage\x18\x03 \x03(\t"s\n\x0cNetStateRule\x12\x1b\n\x05phase\x18\x01 \x01(\x0e\x32\x0c.caffe.Phase\x12\x11\n\tmin_level\x18\x02 \x01(\x05\x12\x11\n\tmax_level\x18\x03 \x01(\x05\x12\r\n\x05stage\x18\x04 \x03(\t\x12\x11\n\tnot_stage\x18\x05 \x03(\t"\xa3\x01\n\tParamSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x31\n\nshare_mode\x18\x02 \x01(\x0e\x32\x1d.caffe.ParamSpec.DimCheckMode\x12\x12\n\x07lr_mult\x18\x03 \x01(\x02:\x01\x31\x12\x15\n\ndecay_mult\x18\x04 \x01(\x02:\x01\x31"*\n\x0c\x44imCheckMode\x12\n\n\x06STRICT\x10\x00\x12\x0e\n\nPERMISSIVE\x10\x01"\x82\x14\n\x0eLayerParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x0e\n\x06\x62ottom\x18\x03 \x03(\t\x12\x0b\n\x03top\x18\x04 \x03(\t\x12\x1b\n\x05phase\x18\n \x01(\x0e\x32\x0c.caffe.Phase\x12\x13\n\x0bloss_weight\x18\x05 \x03(\x02\x12\x1f\n\x05param\x18\x06 \x03(\x0b\x32\x10.caffe.ParamSpec\x12\x1f\n\x05\x62lobs\x18\x07 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x16\n\x0epropagate_down\x18\x0b \x03(\x08\x12$\n\x07include\x18\x08 \x03(\x0b\x32\x13.caffe.NetStateRule\x12$\n\x07\x65xclude\x18\t \x03(\x0b\x32\x13.caffe.NetStateRule\x12\x37\n\x0ftransform_param\x18\x64 \x01(\x0b\x32\x1e.caffe.TransformationParameter\x12(\n\nloss_param\x18\x65 \x01(\x0b\x32\x14.caffe.LossParameter\x12\x30\n\x0e\x61\x63\x63uracy_param\x18\x66 \x01(\x0b\x32\x18.caffe.AccuracyParameter\x12,\n\x0c\x61rgmax_param\x18g \x01(\x0b\x32\x16.caffe.ArgMaxParameter\x12\x34\n\x10\x62\x61tch_norm_param\x18\x8b\x01 \x01(\x0b\x32\x19.caffe.BatchNormParameter\x12)\n\nbias_param\x18\x8d\x01 \x01(\x0b\x32\x14.caffe.BiasParameter\x12,\n\x0c\x63oncat_param\x18h \x01(\x0b\x32\x16.caffe.ConcatParameter\x12?\n\x16\x63ontrastive_loss_param\x18i \x01(\x0b\x32\x1f.caffe.ContrastiveLossParameter\x12\x36\n\x11\x63onvolution_param\x18j \x01(\x0b\x32\x1b.caffe.ConvolutionParameter\x12)\n\ncrop_param\x18\x90\x01 \x01(\x0b\x32\x14.caffe.CropParameter\x12(\n\ndata_param\x18k \x01(\x0b\x32\x14.caffe.DataParameter\x12.\n\rdropout_param\x18l \x01(\x0b\x32\x17.caffe.DropoutParameter\x12\x33\n\x10\x64ummy_data_param\x18m \x01(\x0b\x32\x19.caffe.DummyDataParameter\x12.\n\reltwise_param\x18n \x01(\x0b\x32\x17.caffe.EltwiseParameter\x12\'\n\telu_param\x18\x8c\x01 \x01(\x0b\x32\x13.caffe.ELUParameter\x12+\n\x0b\x65mbed_param\x18\x89\x01 \x01(\x0b\x32\x15.caffe.EmbedParameter\x12&\n\texp_param\x18o \x01(\x0b\x32\x13.caffe.ExpParameter\x12/\n\rflatten_param\x18\x87\x01 \x01(\x0b\x32\x17.caffe.FlattenParameter\x12\x31\n\x0fhdf5_data_param\x18p \x01(\x0b\x32\x18.caffe.HDF5DataParameter\x12\x35\n\x11hdf5_output_param\x18q \x01(\x0b\x32\x1a.caffe.HDF5OutputParameter\x12\x33\n\x10hinge_loss_param\x18r \x01(\x0b\x32\x19.caffe.HingeLossParameter\x12\x33\n\x10image_data_param\x18s \x01(\x0b\x32\x19.caffe.ImageDataParameter\x12\x39\n\x13infogain_loss_param\x18t \x01(\x0b\x32\x1c.caffe.InfogainLossParameter\x12\x39\n\x13inner_product_param\x18u \x01(\x0b\x32\x1c.caffe.InnerProductParameter\x12+\n\x0binput_param\x18\x8f\x01 \x01(\x0b\x32\x15.caffe.InputParameter\x12\'\n\tlog_param\x18\x86\x01 \x01(\x0b\x32\x13.caffe.LogParameter\x12&\n\tlrn_param\x18v \x01(\x0b\x32\x13.caffe.LRNParameter\x12\x35\n\x11memory_data_param\x18w \x01(\x0b\x32\x1a.caffe.MemoryDataParameter\x12&\n\tmvn_param\x18x \x01(\x0b\x32\x13.caffe.MVNParameter\x12\x33\n\x0fparameter_param\x18\x91\x01 \x01(\x0b\x32\x19.caffe.ParameterParameter\x12.\n\rpooling_param\x18y \x01(\x0b\x32\x17.caffe.PoolingParameter\x12*\n\x0bpower_param\x18z \x01(\x0b\x32\x15.caffe.PowerParameter\x12+\n\x0bprelu_param\x18\x83\x01 \x01(\x0b\x32\x15.caffe.PReLUParameter\x12-\n\x0cpython_param\x18\x82\x01 \x01(\x0b\x32\x16.caffe.PythonParameter\x12\x33\n\x0frecurrent_param\x18\x92\x01 \x01(\x0b\x32\x19.caffe.RecurrentParameter\x12\x33\n\x0freduction_param\x18\x88\x01 \x01(\x0b\x32\x19.caffe.ReductionParameter\x12(\n\nrelu_param\x18{ \x01(\x0b\x32\x14.caffe.ReLUParameter\x12/\n\rreshape_param\x18\x85\x01 \x01(\x0b\x32\x17.caffe.ReshapeParameter\x12+\n\x0bscale_param\x18\x8e\x01 \x01(\x0b\x32\x15.caffe.ScaleParameter\x12.\n\rsigmoid_param\x18| \x01(\x0b\x32\x17.caffe.SigmoidParameter\x12.\n\rsoftmax_param\x18} \x01(\x0b\x32\x17.caffe.SoftmaxParameter\x12\'\n\tspp_param\x18\x84\x01 \x01(\x0b\x32\x13.caffe.SPPParameter\x12*\n\x0bslice_param\x18~ \x01(\x0b\x32\x15.caffe.SliceParameter\x12(\n\ntanh_param\x18\x7f \x01(\x0b\x32\x14.caffe.TanHParameter\x12\x33\n\x0fthreshold_param\x18\x80\x01 \x01(\x0b\x32\x19.caffe.ThresholdParameter\x12)\n\ntile_param\x18\x8a\x01 \x01(\x0b\x32\x14.caffe.TileParameter\x12\x36\n\x11window_data_param\x18\x81\x01 \x01(\x0b\x32\x1a.caffe.WindowDataParameter"\xb6\x01\n\x17TransformationParameter\x12\x10\n\x05scale\x18\x01 \x01(\x02:\x01\x31\x12\x15\n\x06mirror\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x14\n\tcrop_size\x18\x03 \x01(\r:\x01\x30\x12\x11\n\tmean_file\x18\x04 \x01(\t\x12\x12\n\nmean_value\x18\x05 \x03(\x02\x12\x1a\n\x0b\x66orce_color\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x19\n\nforce_gray\x18\x07 \x01(\x08:\x05\x66\x61lse"\xc2\x01\n\rLossParameter\x12\x14\n\x0cignore_label\x18\x01 \x01(\x05\x12\x44\n\rnormalization\x18\x03 \x01(\x0e\x32&.caffe.LossParameter.NormalizationMode:\x05VALID\x12\x11\n\tnormalize\x18\x02 \x01(\x08"B\n\x11NormalizationMode\x12\x08\n\x04\x46ULL\x10\x00\x12\t\n\x05VALID\x10\x01\x12\x0e\n\nBATCH_SIZE\x10\x02\x12\x08\n\x04NONE\x10\x03"L\n\x11\x41\x63\x63uracyParameter\x12\x10\n\x05top_k\x18\x01 \x01(\r:\x01\x31\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\x12\x14\n\x0cignore_label\x18\x03 \x01(\x05"M\n\x0f\x41rgMaxParameter\x12\x1a\n\x0bout_max_val\x18\x01 \x01(\x08:\x05\x66\x61lse\x12\x10\n\x05top_k\x18\x02 \x01(\r:\x01\x31\x12\x0c\n\x04\x61xis\x18\x03 \x01(\x05"9\n\x0f\x43oncatParameter\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\x12\x15\n\nconcat_dim\x18\x01 \x01(\r:\x01\x31"j\n\x12\x42\x61tchNormParameter\x12\x18\n\x10use_global_stats\x18\x01 \x01(\x08\x12&\n\x17moving_average_fraction\x18\x02 \x01(\x02:\x05\x30.999\x12\x12\n\x03\x65ps\x18\x03 \x01(\x02:\x05\x31\x65-05"]\n\rBiasParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x13\n\x08num_axes\x18\x02 \x01(\x05:\x01\x31\x12&\n\x06\x66iller\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter"L\n\x18\x43ontrastiveLossParameter\x12\x11\n\x06margin\x18\x01 \x01(\x02:\x01\x31\x12\x1d\n\x0elegacy_version\x18\x02 \x01(\x08:\x05\x66\x61lse"\xfc\x03\n\x14\x43onvolutionParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x17\n\tbias_term\x18\x02 \x01(\x08:\x04true\x12\x0b\n\x03pad\x18\x03 \x03(\r\x12\x13\n\x0bkernel_size\x18\x04 \x03(\r\x12\x0e\n\x06stride\x18\x06 \x03(\r\x12\x10\n\x08\x64ilation\x18\x12 \x03(\r\x12\x10\n\x05pad_h\x18\t \x01(\r:\x01\x30\x12\x10\n\x05pad_w\x18\n \x01(\r:\x01\x30\x12\x10\n\x08kernel_h\x18\x0b \x01(\r\x12\x10\n\x08kernel_w\x18\x0c \x01(\r\x12\x10\n\x08stride_h\x18\r \x01(\r\x12\x10\n\x08stride_w\x18\x0e \x01(\r\x12\x10\n\x05group\x18\x05 \x01(\r:\x01\x31\x12-\n\rweight_filler\x18\x07 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x08 \x01(\x0b\x32\x16.caffe.FillerParameter\x12;\n\x06\x65ngine\x18\x0f \x01(\x0e\x32".caffe.ConvolutionParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x0f\n\x04\x61xis\x18\x10 \x01(\x05:\x01\x31\x12\x1e\n\x0f\x66orce_nd_im2col\x18\x11 \x01(\x08:\x05\x66\x61lse"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02"0\n\rCropParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x32\x12\x0e\n\x06offset\x18\x02 \x03(\r"\xa4\x02\n\rDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x12\n\nbatch_size\x18\x04 \x01(\r\x12\x14\n\trand_skip\x18\x07 \x01(\r:\x01\x30\x12\x31\n\x07\x62\x61\x63kend\x18\x08 \x01(\x0e\x32\x17.caffe.DataParameter.DB:\x07LEVELDB\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12"\n\x13\x66orce_encoded_color\x18\t \x01(\x08:\x05\x66\x61lse\x12\x13\n\x08prefetch\x18\n \x01(\r:\x01\x34"\x1b\n\x02\x44\x42\x12\x0b\n\x07LEVELDB\x10\x00\x12\x08\n\x04LMDB\x10\x01".\n\x10\x44ropoutParameter\x12\x1a\n\rdropout_ratio\x18\x01 \x01(\x02:\x03\x30.5"\xa0\x01\n\x12\x44ummyDataParameter\x12+\n\x0b\x64\x61ta_filler\x18\x01 \x03(\x0b\x32\x16.caffe.FillerParameter\x12\x1f\n\x05shape\x18\x06 \x03(\x0b\x32\x10.caffe.BlobShape\x12\x0b\n\x03num\x18\x02 \x03(\r\x12\x10\n\x08\x63hannels\x18\x03 \x03(\r\x12\x0e\n\x06height\x18\x04 \x03(\r\x12\r\n\x05width\x18\x05 \x03(\r"\xa5\x01\n\x10\x45ltwiseParameter\x12\x39\n\toperation\x18\x01 \x01(\x0e\x32!.caffe.EltwiseParameter.EltwiseOp:\x03SUM\x12\r\n\x05\x63oeff\x18\x02 \x03(\x02\x12\x1e\n\x10stable_prod_grad\x18\x03 \x01(\x08:\x04true"\'\n\tEltwiseOp\x12\x08\n\x04PROD\x10\x00\x12\x07\n\x03SUM\x10\x01\x12\x07\n\x03MAX\x10\x02" \n\x0c\x45LUParameter\x12\x10\n\x05\x61lpha\x18\x01 \x01(\x02:\x01\x31"\xac\x01\n\x0e\x45mbedParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x11\n\tinput_dim\x18\x02 \x01(\r\x12\x17\n\tbias_term\x18\x03 \x01(\x08:\x04true\x12-\n\rweight_filler\x18\x04 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x05 \x01(\x0b\x32\x16.caffe.FillerParameter"D\n\x0c\x45xpParameter\x12\x10\n\x04\x62\x61se\x18\x01 \x01(\x02:\x02-1\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30"9\n\x10\x46lattenParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x14\n\x08\x65nd_axis\x18\x02 \x01(\x05:\x02-1"O\n\x11HDF5DataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x12\n\nbatch_size\x18\x02 \x01(\r\x12\x16\n\x07shuffle\x18\x03 \x01(\x08:\x05\x66\x61lse"(\n\x13HDF5OutputParameter\x12\x11\n\tfile_name\x18\x01 \x01(\t"^\n\x12HingeLossParameter\x12\x30\n\x04norm\x18\x01 \x01(\x0e\x32\x1e.caffe.HingeLossParameter.Norm:\x02L1"\x16\n\x04Norm\x12\x06\n\x02L1\x10\x01\x12\x06\n\x02L2\x10\x02"\x97\x02\n\x12ImageDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x15\n\nbatch_size\x18\x04 \x01(\r:\x01\x31\x12\x14\n\trand_skip\x18\x07 \x01(\r:\x01\x30\x12\x16\n\x07shuffle\x18\x08 \x01(\x08:\x05\x66\x61lse\x12\x15\n\nnew_height\x18\t \x01(\r:\x01\x30\x12\x14\n\tnew_width\x18\n \x01(\r:\x01\x30\x12\x16\n\x08is_color\x18\x0b \x01(\x08:\x04true\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x15\n\x0broot_folder\x18\x0c \x01(\t:\x00"8\n\x15InfogainLossParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31"\xcb\x01\n\x15InnerProductParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x17\n\tbias_term\x18\x02 \x01(\x08:\x04true\x12-\n\rweight_filler\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x04 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x0f\n\x04\x61xis\x18\x05 \x01(\x05:\x01\x31\x12\x18\n\ttranspose\x18\x06 \x01(\x08:\x05\x66\x61lse"1\n\x0eInputParameter\x12\x1f\n\x05shape\x18\x01 \x03(\x0b\x32\x10.caffe.BlobShape"D\n\x0cLogParameter\x12\x10\n\x04\x62\x61se\x18\x01 \x01(\x02:\x02-1\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30"\xb8\x02\n\x0cLRNParameter\x12\x15\n\nlocal_size\x18\x01 \x01(\r:\x01\x35\x12\x10\n\x05\x61lpha\x18\x02 \x01(\x02:\x01\x31\x12\x12\n\x04\x62\x65ta\x18\x03 \x01(\x02:\x04\x30.75\x12\x44\n\x0bnorm_region\x18\x04 \x01(\x0e\x32\x1e.caffe.LRNParameter.NormRegion:\x0f\x41\x43ROSS_CHANNELS\x12\x0c\n\x01k\x18\x05 \x01(\x02:\x01\x31\x12\x33\n\x06\x65ngine\x18\x06 \x01(\x0e\x32\x1a.caffe.LRNParameter.Engine:\x07\x44\x45\x46\x41ULT"5\n\nNormRegion\x12\x13\n\x0f\x41\x43ROSS_CHANNELS\x10\x00\x12\x12\n\x0eWITHIN_CHANNEL\x10\x01"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02"Z\n\x13MemoryDataParameter\x12\x12\n\nbatch_size\x18\x01 \x01(\r\x12\x10\n\x08\x63hannels\x18\x02 \x01(\r\x12\x0e\n\x06height\x18\x03 \x01(\r\x12\r\n\x05width\x18\x04 \x01(\r"d\n\x0cMVNParameter\x12 \n\x12normalize_variance\x18\x01 \x01(\x08:\x04true\x12\x1e\n\x0f\x61\x63ross_channels\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x12\n\x03\x65ps\x18\x03 \x01(\x02:\x05\x31\x65-09"5\n\x12ParameterParameter\x12\x1f\n\x05shape\x18\x01 \x01(\x0b\x32\x10.caffe.BlobShape"\xa2\x03\n\x10PoolingParameter\x12\x35\n\x04pool\x18\x01 \x01(\x0e\x32".caffe.PoolingParameter.PoolMethod:\x03MAX\x12\x0e\n\x03pad\x18\x04 \x01(\r:\x01\x30\x12\x10\n\x05pad_h\x18\t \x01(\r:\x01\x30\x12\x10\n\x05pad_w\x18\n \x01(\r:\x01\x30\x12\x13\n\x0bkernel_size\x18\x02 \x01(\r\x12\x10\n\x08kernel_h\x18\x05 \x01(\r\x12\x10\n\x08kernel_w\x18\x06 \x01(\r\x12\x11\n\x06stride\x18\x03 \x01(\r:\x01\x31\x12\x10\n\x08stride_h\x18\x07 \x01(\r\x12\x10\n\x08stride_w\x18\x08 \x01(\r\x12\x37\n\x06\x65ngine\x18\x0b \x01(\x0e\x32\x1e.caffe.PoolingParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x1d\n\x0eglobal_pooling\x18\x0c \x01(\x08:\x05\x66\x61lse".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02"F\n\x0ePowerParameter\x12\x10\n\x05power\x18\x01 \x01(\x02:\x01\x31\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30"g\n\x0fPythonParameter\x12\x0e\n\x06module\x18\x01 \x01(\t\x12\r\n\x05layer\x18\x02 \x01(\t\x12\x13\n\tparam_str\x18\x03 \x01(\t:\x00\x12 \n\x11share_in_parallel\x18\x04 \x01(\x08:\x05\x66\x61lse"\xc0\x01\n\x12RecurrentParameter\x12\x15\n\nnum_output\x18\x01 \x01(\r:\x01\x30\x12-\n\rweight_filler\x18\x02 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x19\n\ndebug_info\x18\x04 \x01(\x08:\x05\x66\x61lse\x12\x1c\n\rexpose_hidden\x18\x05 \x01(\x08:\x05\x66\x61lse"\xad\x01\n\x12ReductionParameter\x12=\n\toperation\x18\x01 \x01(\x0e\x32%.caffe.ReductionParameter.ReductionOp:\x03SUM\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x30\x12\x10\n\x05\x63oeff\x18\x03 \x01(\x02:\x01\x31"5\n\x0bReductionOp\x12\x07\n\x03SUM\x10\x01\x12\x08\n\x04\x41SUM\x10\x02\x12\t\n\x05SUMSQ\x10\x03\x12\x08\n\x04MEAN\x10\x04"\x8d\x01\n\rReLUParameter\x12\x19\n\x0enegative_slope\x18\x01 \x01(\x02:\x01\x30\x12\x34\n\x06\x65ngine\x18\x02 \x01(\x0e\x32\x1b.caffe.ReLUParameter.Engine:\x07\x44\x45\x46\x41ULT"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02"Z\n\x10ReshapeParameter\x12\x1f\n\x05shape\x18\x01 \x01(\x0b\x32\x10.caffe.BlobShape\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x30\x12\x14\n\x08num_axes\x18\x03 \x01(\x05:\x02-1"\xa5\x01\n\x0eScaleParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x13\n\x08num_axes\x18\x02 \x01(\x05:\x01\x31\x12&\n\x06\x66iller\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x18\n\tbias_term\x18\x04 \x01(\x08:\x05\x66\x61lse\x12+\n\x0b\x62ias_filler\x18\x05 \x01(\x0b\x32\x16.caffe.FillerParameter"x\n\x10SigmoidParameter\x12\x37\n\x06\x65ngine\x18\x01 \x01(\x0e\x32\x1e.caffe.SigmoidParameter.Engine:\x07\x44\x45\x46\x41ULT"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02"L\n\x0eSliceParameter\x12\x0f\n\x04\x61xis\x18\x03 \x01(\x05:\x01\x31\x12\x13\n\x0bslice_point\x18\x02 \x03(\r\x12\x14\n\tslice_dim\x18\x01 \x01(\r:\x01\x31"\x89\x01\n\x10SoftmaxParameter\x12\x37\n\x06\x65ngine\x18\x01 \x01(\x0e\x32\x1e.caffe.SoftmaxParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02"r\n\rTanHParameter\x12\x34\n\x06\x65ngine\x18\x01 \x01(\x0e\x32\x1b.caffe.TanHParameter.Engine:\x07\x44\x45\x46\x41ULT"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02"/\n\rTileParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\r\n\x05tiles\x18\x02 \x01(\x05"*\n\x12ThresholdParameter\x12\x14\n\tthreshold\x18\x01 \x01(\x02:\x01\x30"\xc1\x02\n\x13WindowDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x12\n\nbatch_size\x18\x04 \x01(\r\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x19\n\x0c\x66g_threshold\x18\x07 \x01(\x02:\x03\x30.5\x12\x19\n\x0c\x62g_threshold\x18\x08 \x01(\x02:\x03\x30.5\x12\x19\n\x0b\x66g_fraction\x18\t \x01(\x02:\x04\x30.25\x12\x16\n\x0b\x63ontext_pad\x18\n \x01(\r:\x01\x30\x12\x17\n\tcrop_mode\x18\x0b \x01(\t:\x04warp\x12\x1b\n\x0c\x63\x61\x63he_images\x18\x0c \x01(\x08:\x05\x66\x61lse\x12\x15\n\x0broot_folder\x18\r \x01(\t:\x00"\xeb\x01\n\x0cSPPParameter\x12\x16\n\x0epyramid_height\x18\x01 \x01(\r\x12\x31\n\x04pool\x18\x02 \x01(\x0e\x32\x1e.caffe.SPPParameter.PoolMethod:\x03MAX\x12\x33\n\x06\x65ngine\x18\x06 \x01(\x0e\x32\x1a.caffe.SPPParameter.Engine:\x07\x44\x45\x46\x41ULT".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02"\xe0\x13\n\x10V1LayerParameter\x12\x0e\n\x06\x62ottom\x18\x02 \x03(\t\x12\x0b\n\x03top\x18\x03 \x03(\t\x12\x0c\n\x04name\x18\x04 \x01(\t\x12$\n\x07include\x18 \x03(\x0b\x32\x13.caffe.NetStateRule\x12$\n\x07\x65xclude\x18! \x03(\x0b\x32\x13.caffe.NetStateRule\x12/\n\x04type\x18\x05 \x01(\x0e\x32!.caffe.V1LayerParameter.LayerType\x12\x1f\n\x05\x62lobs\x18\x06 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x0e\n\x05param\x18\xe9\x07 \x03(\t\x12>\n\x0f\x62lob_share_mode\x18\xea\x07 \x03(\x0e\x32$.caffe.V1LayerParameter.DimCheckMode\x12\x10\n\x08\x62lobs_lr\x18\x07 \x03(\x02\x12\x14\n\x0cweight_decay\x18\x08 \x03(\x02\x12\x13\n\x0bloss_weight\x18# \x03(\x02\x12\x30\n\x0e\x61\x63\x63uracy_param\x18\x1b \x01(\x0b\x32\x18.caffe.AccuracyParameter\x12,\n\x0c\x61rgmax_param\x18\x17 \x01(\x0b\x32\x16.caffe.ArgMaxParameter\x12,\n\x0c\x63oncat_param\x18\t \x01(\x0b\x32\x16.caffe.ConcatParameter\x12?\n\x16\x63ontrastive_loss_param\x18( \x01(\x0b\x32\x1f.caffe.ContrastiveLossParameter\x12\x36\n\x11\x63onvolution_param\x18\n \x01(\x0b\x32\x1b.caffe.ConvolutionParameter\x12(\n\ndata_param\x18\x0b \x01(\x0b\x32\x14.caffe.DataParameter\x12.\n\rdropout_param\x18\x0c \x01(\x0b\x32\x17.caffe.DropoutParameter\x12\x33\n\x10\x64ummy_data_param\x18\x1a \x01(\x0b\x32\x19.caffe.DummyDataParameter\x12.\n\reltwise_param\x18\x18 \x01(\x0b\x32\x17.caffe.EltwiseParameter\x12&\n\texp_param\x18) \x01(\x0b\x32\x13.caffe.ExpParameter\x12\x31\n\x0fhdf5_data_param\x18\r \x01(\x0b\x32\x18.caffe.HDF5DataParameter\x12\x35\n\x11hdf5_output_param\x18\x0e \x01(\x0b\x32\x1a.caffe.HDF5OutputParameter\x12\x33\n\x10hinge_loss_param\x18\x1d \x01(\x0b\x32\x19.caffe.HingeLossParameter\x12\x33\n\x10image_data_param\x18\x0f \x01(\x0b\x32\x19.caffe.ImageDataParameter\x12\x39\n\x13infogain_loss_param\x18\x10 \x01(\x0b\x32\x1c.caffe.InfogainLossParameter\x12\x39\n\x13inner_product_param\x18\x11 \x01(\x0b\x32\x1c.caffe.InnerProductParameter\x12&\n\tlrn_param\x18\x12 \x01(\x0b\x32\x13.caffe.LRNParameter\x12\x35\n\x11memory_data_param\x18\x16 \x01(\x0b\x32\x1a.caffe.MemoryDataParameter\x12&\n\tmvn_param\x18" \x01(\x0b\x32\x13.caffe.MVNParameter\x12.\n\rpooling_param\x18\x13 \x01(\x0b\x32\x17.caffe.PoolingParameter\x12*\n\x0bpower_param\x18\x15 \x01(\x0b\x32\x15.caffe.PowerParameter\x12(\n\nrelu_param\x18\x1e \x01(\x0b\x32\x14.caffe.ReLUParameter\x12.\n\rsigmoid_param\x18& \x01(\x0b\x32\x17.caffe.SigmoidParameter\x12.\n\rsoftmax_param\x18\' \x01(\x0b\x32\x17.caffe.SoftmaxParameter\x12*\n\x0bslice_param\x18\x1f \x01(\x0b\x32\x15.caffe.SliceParameter\x12(\n\ntanh_param\x18% \x01(\x0b\x32\x14.caffe.TanHParameter\x12\x32\n\x0fthreshold_param\x18\x19 \x01(\x0b\x32\x19.caffe.ThresholdParameter\x12\x35\n\x11window_data_param\x18\x14 \x01(\x0b\x32\x1a.caffe.WindowDataParameter\x12\x37\n\x0ftransform_param\x18$ \x01(\x0b\x32\x1e.caffe.TransformationParameter\x12(\n\nloss_param\x18* \x01(\x0b\x32\x14.caffe.LossParameter\x12&\n\x05layer\x18\x01 \x01(\x0b\x32\x17.caffe.V0LayerParameter"\xd8\x04\n\tLayerType\x12\x08\n\x04NONE\x10\x00\x12\n\n\x06\x41\x42SVAL\x10#\x12\x0c\n\x08\x41\x43\x43URACY\x10\x01\x12\n\n\x06\x41RGMAX\x10\x1e\x12\x08\n\x04\x42NLL\x10\x02\x12\n\n\x06\x43ONCAT\x10\x03\x12\x14\n\x10\x43ONTRASTIVE_LOSS\x10%\x12\x0f\n\x0b\x43ONVOLUTION\x10\x04\x12\x08\n\x04\x44\x41TA\x10\x05\x12\x11\n\rDECONVOLUTION\x10\'\x12\x0b\n\x07\x44ROPOUT\x10\x06\x12\x0e\n\nDUMMY_DATA\x10 \x12\x12\n\x0e\x45UCLIDEAN_LOSS\x10\x07\x12\x0b\n\x07\x45LTWISE\x10\x19\x12\x07\n\x03\x45XP\x10&\x12\x0b\n\x07\x46LATTEN\x10\x08\x12\r\n\tHDF5_DATA\x10\t\x12\x0f\n\x0bHDF5_OUTPUT\x10\n\x12\x0e\n\nHINGE_LOSS\x10\x1c\x12\n\n\x06IM2COL\x10\x0b\x12\x0e\n\nIMAGE_DATA\x10\x0c\x12\x11\n\rINFOGAIN_LOSS\x10\r\x12\x11\n\rINNER_PRODUCT\x10\x0e\x12\x07\n\x03LRN\x10\x0f\x12\x0f\n\x0bMEMORY_DATA\x10\x1d\x12\x1d\n\x19MULTINOMIAL_LOGISTIC_LOSS\x10\x10\x12\x07\n\x03MVN\x10"\x12\x0b\n\x07POOLING\x10\x11\x12\t\n\x05POWER\x10\x1a\x12\x08\n\x04RELU\x10\x12\x12\x0b\n\x07SIGMOID\x10\x13\x12\x1e\n\x1aSIGMOID_CROSS_ENTROPY_LOSS\x10\x1b\x12\x0b\n\x07SILENCE\x10$\x12\x0b\n\x07SOFTMAX\x10\x14\x12\x10\n\x0cSOFTMAX_LOSS\x10\x15\x12\t\n\x05SPLIT\x10\x16\x12\t\n\x05SLICE\x10!\x12\x08\n\x04TANH\x10\x17\x12\x0f\n\x0bWINDOW_DATA\x10\x18\x12\r\n\tTHRESHOLD\x10\x1f"*\n\x0c\x44imCheckMode\x12\n\n\x06STRICT\x10\x00\x12\x0e\n\nPERMISSIVE\x10\x01"\xfd\x07\n\x10V0LayerParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x12\n\nnum_output\x18\x03 \x01(\r\x12\x16\n\x08\x62iasterm\x18\x04 \x01(\x08:\x04true\x12-\n\rweight_filler\x18\x05 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x06 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x0e\n\x03pad\x18\x07 \x01(\r:\x01\x30\x12\x12\n\nkernelsize\x18\x08 \x01(\r\x12\x10\n\x05group\x18\t \x01(\r:\x01\x31\x12\x11\n\x06stride\x18\n \x01(\r:\x01\x31\x12\x35\n\x04pool\x18\x0b \x01(\x0e\x32".caffe.V0LayerParameter.PoolMethod:\x03MAX\x12\x1a\n\rdropout_ratio\x18\x0c \x01(\x02:\x03\x30.5\x12\x15\n\nlocal_size\x18\r \x01(\r:\x01\x35\x12\x10\n\x05\x61lpha\x18\x0e \x01(\x02:\x01\x31\x12\x12\n\x04\x62\x65ta\x18\x0f \x01(\x02:\x04\x30.75\x12\x0c\n\x01k\x18\x16 \x01(\x02:\x01\x31\x12\x0e\n\x06source\x18\x10 \x01(\t\x12\x10\n\x05scale\x18\x11 \x01(\x02:\x01\x31\x12\x10\n\x08meanfile\x18\x12 \x01(\t\x12\x11\n\tbatchsize\x18\x13 \x01(\r\x12\x13\n\x08\x63ropsize\x18\x14 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x15 \x01(\x08:\x05\x66\x61lse\x12\x1f\n\x05\x62lobs\x18\x32 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x10\n\x08\x62lobs_lr\x18\x33 \x03(\x02\x12\x14\n\x0cweight_decay\x18\x34 \x03(\x02\x12\x14\n\trand_skip\x18\x35 \x01(\r:\x01\x30\x12\x1d\n\x10\x64\x65t_fg_threshold\x18\x36 \x01(\x02:\x03\x30.5\x12\x1d\n\x10\x64\x65t_bg_threshold\x18\x37 \x01(\x02:\x03\x30.5\x12\x1d\n\x0f\x64\x65t_fg_fraction\x18\x38 \x01(\x02:\x04\x30.25\x12\x1a\n\x0f\x64\x65t_context_pad\x18: \x01(\r:\x01\x30\x12\x1b\n\rdet_crop_mode\x18; \x01(\t:\x04warp\x12\x12\n\x07new_num\x18< \x01(\x05:\x01\x30\x12\x17\n\x0cnew_channels\x18= \x01(\x05:\x01\x30\x12\x15\n\nnew_height\x18> \x01(\x05:\x01\x30\x12\x14\n\tnew_width\x18? \x01(\x05:\x01\x30\x12\x1d\n\x0eshuffle_images\x18@ \x01(\x08:\x05\x66\x61lse\x12\x15\n\nconcat_dim\x18\x41 \x01(\r:\x01\x31\x12\x36\n\x11hdf5_output_param\x18\xe9\x07 \x01(\x0b\x32\x1a.caffe.HDF5OutputParameter".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02"W\n\x0ePReLUParameter\x12&\n\x06\x66iller\x18\x01 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x1d\n\x0e\x63hannel_shared\x18\x02 \x01(\x08:\x05\x66\x61lse*\x1c\n\x05Phase\x12\t\n\x05TRAIN\x10\x00\x12\x08\n\x04TEST\x10\x01'
),
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_PHASE = _descriptor.EnumDescriptor(
name="Phase",
full_name="caffe.Phase",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="TRAIN", index=0, number=0, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="TEST", index=1, number=1, options=None, type=None
),
],
containing_type=None,
options=None,
serialized_start=15403,
serialized_end=15431,
)
_sym_db.RegisterEnumDescriptor(_PHASE)
Phase = enum_type_wrapper.EnumTypeWrapper(_PHASE)
TRAIN = 0
TEST = 1
_FILLERPARAMETER_VARIANCENORM = _descriptor.EnumDescriptor(
name="VarianceNorm",
full_name="caffe.FillerParameter.VarianceNorm",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="FAN_IN", index=0, number=0, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="FAN_OUT", index=1, number=1, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="AVERAGE", index=2, number=2, options=None, type=None
),
],
containing_type=None,
options=None,
serialized_start=658,
serialized_end=710,
)
_sym_db.RegisterEnumDescriptor(_FILLERPARAMETER_VARIANCENORM)
_SOLVERPARAMETER_SNAPSHOTFORMAT = _descriptor.EnumDescriptor(
name="SnapshotFormat",
full_name="caffe.SolverParameter.SnapshotFormat",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="HDF5", index=0, number=0, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="BINARYPROTO", index=1, number=1, options=None, type=None
),
],
containing_type=None,
options=None,
serialized_start=2171,
serialized_end=2214,
)
_sym_db.RegisterEnumDescriptor(_SOLVERPARAMETER_SNAPSHOTFORMAT)
_SOLVERPARAMETER_SOLVERMODE = _descriptor.EnumDescriptor(
name="SolverMode",
full_name="caffe.SolverParameter.SolverMode",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="CPU", index=0, number=0, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="GPU", index=1, number=1, options=None, type=None
),
],
containing_type=None,
options=None,
serialized_start=2216,
serialized_end=2246,
)
_sym_db.RegisterEnumDescriptor(_SOLVERPARAMETER_SOLVERMODE)
_SOLVERPARAMETER_SOLVERTYPE = _descriptor.EnumDescriptor(
name="SolverType",
full_name="caffe.SolverParameter.SolverType",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="SGD", index=0, number=0, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="NESTEROV", index=1, number=1, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="ADAGRAD", index=2, number=2, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="RMSPROP", index=3, number=3, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="ADADELTA", index=4, number=4, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="ADAM", index=5, number=5, options=None, type=None
),
],
containing_type=None,
options=None,
serialized_start=2248,
serialized_end=2333,
)
_sym_db.RegisterEnumDescriptor(_SOLVERPARAMETER_SOLVERTYPE)
_PARAMSPEC_DIMCHECKMODE = _descriptor.EnumDescriptor(
name="DimCheckMode",
full_name="caffe.ParamSpec.DimCheckMode",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="STRICT", index=0, number=0, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="PERMISSIVE", index=1, number=1, options=None, type=None
),
],
containing_type=None,
options=None,
serialized_start=2764,
serialized_end=2806,
)
_sym_db.RegisterEnumDescriptor(_PARAMSPEC_DIMCHECKMODE)
_LOSSPARAMETER_NORMALIZATIONMODE = _descriptor.EnumDescriptor(
name="NormalizationMode",
full_name="caffe.LossParameter.NormalizationMode",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="FULL", index=0, number=0, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="VALID", index=1, number=1, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="BATCH_SIZE", index=2, number=2, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="NONE", index=3, number=3, options=None, type=None
),
],
containing_type=None,
options=None,
serialized_start=5687,
serialized_end=5753,
)
_sym_db.RegisterEnumDescriptor(_LOSSPARAMETER_NORMALIZATIONMODE)
_CONVOLUTIONPARAMETER_ENGINE = _descriptor.EnumDescriptor(
name="Engine",
full_name="caffe.ConvolutionParameter.Engine",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="DEFAULT", index=0, number=0, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="CAFFE", index=1, number=1, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="CUDNN", index=2, number=2, options=None, type=None
),
],
containing_type=None,
options=None,
serialized_start=6718,
serialized_end=6761,
)
_sym_db.RegisterEnumDescriptor(_CONVOLUTIONPARAMETER_ENGINE)
_DATAPARAMETER_DB = _descriptor.EnumDescriptor(
name="DB",
full_name="caffe.DataParameter.DB",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="LEVELDB", index=0, number=0, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="LMDB", index=1, number=1, options=None, type=None
),
],
containing_type=None,
options=None,
serialized_start=7079,
serialized_end=7106,
)
_sym_db.RegisterEnumDescriptor(_DATAPARAMETER_DB)
_ELTWISEPARAMETER_ELTWISEOP = _descriptor.EnumDescriptor(
name="EltwiseOp",
full_name="caffe.EltwiseParameter.EltwiseOp",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="PROD", index=0, number=0, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="SUM", index=1, number=1, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="MAX", index=2, number=2, options=None, type=None
),
],
containing_type=None,
options=None,
serialized_start=7446,
serialized_end=7485,
)
_sym_db.RegisterEnumDescriptor(_ELTWISEPARAMETER_ELTWISEOP)
_HINGELOSSPARAMETER_NORM = _descriptor.EnumDescriptor(
name="Norm",
full_name="caffe.HingeLossParameter.Norm",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="L1", index=0, number=1, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="L2", index=1, number=2, options=None, type=None
),
],
containing_type=None,
options=None,
serialized_start=8020,
serialized_end=8042,
)
_sym_db.RegisterEnumDescriptor(_HINGELOSSPARAMETER_NORM)
_LRNPARAMETER_NORMREGION = _descriptor.EnumDescriptor(
name="NormRegion",
full_name="caffe.LRNParameter.NormRegion",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="ACROSS_CHANNELS", index=0, number=0, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="WITHIN_CHANNEL", index=1, number=1, options=None, type=None
),
],
containing_type=None,
options=None,
serialized_start=8926,
serialized_end=8979,
)
_sym_db.RegisterEnumDescriptor(_LRNPARAMETER_NORMREGION)
_LRNPARAMETER_ENGINE = _descriptor.EnumDescriptor(
name="Engine",
full_name="caffe.LRNParameter.Engine",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="DEFAULT", index=0, number=0, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="CAFFE", index=1, number=1, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="CUDNN", index=2, number=2, options=None, type=None
),
],
containing_type=None,
options=None,
serialized_start=6718,
serialized_end=6761,
)
_sym_db.RegisterEnumDescriptor(_LRNPARAMETER_ENGINE)
_POOLINGPARAMETER_POOLMETHOD = _descriptor.EnumDescriptor(
name="PoolMethod",
full_name="caffe.PoolingParameter.PoolMethod",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="MAX", index=0, number=0, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="AVE", index=1, number=1, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="STOCHASTIC", index=2, number=2, options=None, type=None
),
],
containing_type=None,
options=None,
serialized_start=9603,
serialized_end=9649,
)
_sym_db.RegisterEnumDescriptor(_POOLINGPARAMETER_POOLMETHOD)
_POOLINGPARAMETER_ENGINE = _descriptor.EnumDescriptor(
name="Engine",
full_name="caffe.PoolingParameter.Engine",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="DEFAULT", index=0, number=0, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="CAFFE", index=1, number=1, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="CUDNN", index=2, number=2, options=None, type=None
),
],
containing_type=None,
options=None,
serialized_start=6718,
serialized_end=6761,
)
_sym_db.RegisterEnumDescriptor(_POOLINGPARAMETER_ENGINE)
_REDUCTIONPARAMETER_REDUCTIONOP = _descriptor.EnumDescriptor(
name="ReductionOp",
full_name="caffe.ReductionParameter.ReductionOp",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="SUM", index=0, number=1, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="ASUM", index=1, number=2, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="SUMSQ", index=2, number=3, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="MEAN", index=3, number=4, options=None, type=None
),
],
containing_type=None,
options=None,
serialized_start=10189,
serialized_end=10242,
)
_sym_db.RegisterEnumDescriptor(_REDUCTIONPARAMETER_REDUCTIONOP)
_RELUPARAMETER_ENGINE = _descriptor.EnumDescriptor(
name="Engine",
full_name="caffe.ReLUParameter.Engine",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="DEFAULT", index=0, number=0, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="CAFFE", index=1, number=1, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="CUDNN", index=2, number=2, options=None, type=None
),
],
containing_type=None,
options=None,
serialized_start=6718,
serialized_end=6761,
)
_sym_db.RegisterEnumDescriptor(_RELUPARAMETER_ENGINE)
_SIGMOIDPARAMETER_ENGINE = _descriptor.EnumDescriptor(
name="Engine",
full_name="caffe.SigmoidParameter.Engine",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="DEFAULT", index=0, number=0, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="CAFFE", index=1, number=1, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="CUDNN", index=2, number=2, options=None, type=None
),
],
containing_type=None,
options=None,
serialized_start=6718,
serialized_end=6761,
)
_sym_db.RegisterEnumDescriptor(_SIGMOIDPARAMETER_ENGINE)
_SOFTMAXPARAMETER_ENGINE = _descriptor.EnumDescriptor(
name="Engine",
full_name="caffe.SoftmaxParameter.Engine",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="DEFAULT", index=0, number=0, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="CAFFE", index=1, number=1, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="CUDNN", index=2, number=2, options=None, type=None
),
],
containing_type=None,
options=None,
serialized_start=6718,
serialized_end=6761,
)
_sym_db.RegisterEnumDescriptor(_SOFTMAXPARAMETER_ENGINE)
_TANHPARAMETER_ENGINE = _descriptor.EnumDescriptor(
name="Engine",
full_name="caffe.TanHParameter.Engine",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="DEFAULT", index=0, number=0, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="CAFFE", index=1, number=1, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="CUDNN", index=2, number=2, options=None, type=None
),
],
containing_type=None,
options=None,
serialized_start=6718,
serialized_end=6761,
)
_sym_db.RegisterEnumDescriptor(_TANHPARAMETER_ENGINE)
_SPPPARAMETER_POOLMETHOD = _descriptor.EnumDescriptor(
name="PoolMethod",
full_name="caffe.SPPParameter.PoolMethod",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="MAX", index=0, number=0, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="AVE", index=1, number=1, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="STOCHASTIC", index=2, number=2, options=None, type=None
),
],
containing_type=None,
options=None,
serialized_start=9603,
serialized_end=9649,
)
_sym_db.RegisterEnumDescriptor(_SPPPARAMETER_POOLMETHOD)
_SPPPARAMETER_ENGINE = _descriptor.EnumDescriptor(
name="Engine",
full_name="caffe.SPPParameter.Engine",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="DEFAULT", index=0, number=0, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="CAFFE", index=1, number=1, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="CUDNN", index=2, number=2, options=None, type=None
),
],
containing_type=None,
options=None,
serialized_start=6718,
serialized_end=6761,
)
_sym_db.RegisterEnumDescriptor(_SPPPARAMETER_ENGINE)
_V1LAYERPARAMETER_LAYERTYPE = _descriptor.EnumDescriptor(
name="LayerType",
full_name="caffe.V1LayerParameter.LayerType",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="NONE", index=0, number=0, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="ABSVAL", index=1, number=35, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="ACCURACY", index=2, number=1, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="ARGMAX", index=3, number=30, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="BNLL", index=4, number=2, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="CONCAT", index=5, number=3, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="CONTRASTIVE_LOSS", index=6, number=37, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="CONVOLUTION", index=7, number=4, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="DATA", index=8, number=5, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="DECONVOLUTION", index=9, number=39, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="DROPOUT", index=10, number=6, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="DUMMY_DATA", index=11, number=32, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="EUCLIDEAN_LOSS", index=12, number=7, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="ELTWISE", index=13, number=25, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="EXP", index=14, number=38, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="FLATTEN", index=15, number=8, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="HDF5_DATA", index=16, number=9, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="HDF5_OUTPUT", index=17, number=10, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="HINGE_LOSS", index=18, number=28, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="IM2COL", index=19, number=11, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="IMAGE_DATA", index=20, number=12, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="INFOGAIN_LOSS", index=21, number=13, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="INNER_PRODUCT", index=22, number=14, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="LRN", index=23, number=15, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="MEMORY_DATA", index=24, number=29, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="MULTINOMIAL_LOGISTIC_LOSS",
index=25,
number=16,
options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="MVN", index=26, number=34, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="POOLING", index=27, number=17, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="POWER", index=28, number=26, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="RELU", index=29, number=18, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="SIGMOID", index=30, number=19, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="SIGMOID_CROSS_ENTROPY_LOSS",
index=31,
number=27,
options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="SILENCE", index=32, number=36, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="SOFTMAX", index=33, number=20, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="SOFTMAX_LOSS", index=34, number=21, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="SPLIT", index=35, number=22, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="SLICE", index=36, number=33, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="TANH", index=37, number=23, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="WINDOW_DATA", index=38, number=24, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="THRESHOLD", index=39, number=31, options=None, type=None
),
],
containing_type=None,
options=None,
serialized_start=13644,
serialized_end=14244,
)
_sym_db.RegisterEnumDescriptor(_V1LAYERPARAMETER_LAYERTYPE)
_V1LAYERPARAMETER_DIMCHECKMODE = _descriptor.EnumDescriptor(
name="DimCheckMode",
full_name="caffe.V1LayerParameter.DimCheckMode",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="STRICT", index=0, number=0, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="PERMISSIVE", index=1, number=1, options=None, type=None
),
],
containing_type=None,
options=None,
serialized_start=2764,
serialized_end=2806,
)
_sym_db.RegisterEnumDescriptor(_V1LAYERPARAMETER_DIMCHECKMODE)
_V0LAYERPARAMETER_POOLMETHOD = _descriptor.EnumDescriptor(
name="PoolMethod",
full_name="caffe.V0LayerParameter.PoolMethod",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="MAX", index=0, number=0, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="AVE", index=1, number=1, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="STOCHASTIC", index=2, number=2, options=None, type=None
),
],
containing_type=None,
options=None,
serialized_start=9603,
serialized_end=9649,
)
_sym_db.RegisterEnumDescriptor(_V0LAYERPARAMETER_POOLMETHOD)
_BLOBSHAPE = _descriptor.Descriptor(
name="BlobShape",
full_name="caffe.BlobShape",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="dim",
full_name="caffe.BlobShape.dim",
index=0,
number=1,
type=3,
cpp_type=2,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=_descriptor._ParseOptions(
descriptor_pb2.FieldOptions(), _b("\020\001")
),
)
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=22,
serialized_end=50,
)
_BLOBPROTO = _descriptor.Descriptor(
name="BlobProto",
full_name="caffe.BlobProto",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="shape",
full_name="caffe.BlobProto.shape",
index=0,
number=7,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="data",
full_name="caffe.BlobProto.data",
index=1,
number=5,
type=2,
cpp_type=6,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=_descriptor._ParseOptions(
descriptor_pb2.FieldOptions(), _b("\020\001")
),
),
_descriptor.FieldDescriptor(
name="diff",
full_name="caffe.BlobProto.diff",
index=2,
number=6,
type=2,
cpp_type=6,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=_descriptor._ParseOptions(
descriptor_pb2.FieldOptions(), _b("\020\001")
),
),
_descriptor.FieldDescriptor(
name="double_data",
full_name="caffe.BlobProto.double_data",
index=3,
number=8,
type=1,
cpp_type=5,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=_descriptor._ParseOptions(
descriptor_pb2.FieldOptions(), _b("\020\001")
),
),
_descriptor.FieldDescriptor(
name="double_diff",
full_name="caffe.BlobProto.double_diff",
index=4,
number=9,
type=1,
cpp_type=5,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=_descriptor._ParseOptions(
descriptor_pb2.FieldOptions(), _b("\020\001")
),
),
_descriptor.FieldDescriptor(
name="num",
full_name="caffe.BlobProto.num",
index=5,
number=1,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="channels",
full_name="caffe.BlobProto.channels",
index=6,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="height",
full_name="caffe.BlobProto.height",
index=7,
number=3,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="width",
full_name="caffe.BlobProto.width",
index=8,
number=4,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=53,
serialized_end=257,
)
_BLOBPROTOVECTOR = _descriptor.Descriptor(
name="BlobProtoVector",
full_name="caffe.BlobProtoVector",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="blobs",
full_name="caffe.BlobProtoVector.blobs",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
)
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=259,
serialized_end=309,
)
_DATUM = _descriptor.Descriptor(
name="Datum",
full_name="caffe.Datum",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="channels",
full_name="caffe.Datum.channels",
index=0,
number=1,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="height",
full_name="caffe.Datum.height",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="width",
full_name="caffe.Datum.width",
index=2,
number=3,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="data",
full_name="caffe.Datum.data",
index=3,
number=4,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b(""),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="label",
full_name="caffe.Datum.label",
index=4,
number=5,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="float_data",
full_name="caffe.Datum.float_data",
index=5,
number=6,
type=2,
cpp_type=6,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="encoded",
full_name="caffe.Datum.encoded",
index=6,
number=7,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=312,
serialized_end=441,
)
_FILLERPARAMETER = _descriptor.Descriptor(
name="FillerParameter",
full_name="caffe.FillerParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="type",
full_name="caffe.FillerParameter.type",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=True,
default_value=_b("constant").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="value",
full_name="caffe.FillerParameter.value",
index=1,
number=2,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="min",
full_name="caffe.FillerParameter.min",
index=2,
number=3,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="max",
full_name="caffe.FillerParameter.max",
index=3,
number=4,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="mean",
full_name="caffe.FillerParameter.mean",
index=4,
number=5,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="std",
full_name="caffe.FillerParameter.std",
index=5,
number=6,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="sparse",
full_name="caffe.FillerParameter.sparse",
index=6,
number=7,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=-1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="variance_norm",
full_name="caffe.FillerParameter.variance_norm",
index=7,
number=8,
type=14,
cpp_type=8,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[_FILLERPARAMETER_VARIANCENORM],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=444,
serialized_end=710,
)
_NETPARAMETER = _descriptor.Descriptor(
name="NetParameter",
full_name="caffe.NetParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="caffe.NetParameter.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="input",
full_name="caffe.NetParameter.input",
index=1,
number=3,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="input_shape",
full_name="caffe.NetParameter.input_shape",
index=2,
number=8,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="input_dim",
full_name="caffe.NetParameter.input_dim",
index=3,
number=4,
type=5,
cpp_type=1,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="force_backward",
full_name="caffe.NetParameter.force_backward",
index=4,
number=5,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="state",
full_name="caffe.NetParameter.state",
index=5,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="debug_info",
full_name="caffe.NetParameter.debug_info",
index=6,
number=7,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="layer",
full_name="caffe.NetParameter.layer",
index=7,
number=100,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="layers",
full_name="caffe.NetParameter.layers",
index=8,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=713,
serialized_end=983,
)
_SOLVERPARAMETER = _descriptor.Descriptor(
name="SolverParameter",
full_name="caffe.SolverParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="net",
full_name="caffe.SolverParameter.net",
index=0,
number=24,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="net_param",
full_name="caffe.SolverParameter.net_param",
index=1,
number=25,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="train_net",
full_name="caffe.SolverParameter.train_net",
index=2,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="test_net",
full_name="caffe.SolverParameter.test_net",
index=3,
number=2,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="train_net_param",
full_name="caffe.SolverParameter.train_net_param",
index=4,
number=21,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="test_net_param",
full_name="caffe.SolverParameter.test_net_param",
index=5,
number=22,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="train_state",
full_name="caffe.SolverParameter.train_state",
index=6,
number=26,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="test_state",
full_name="caffe.SolverParameter.test_state",
index=7,
number=27,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="test_iter",
full_name="caffe.SolverParameter.test_iter",
index=8,
number=3,
type=5,
cpp_type=1,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="test_interval",
full_name="caffe.SolverParameter.test_interval",
index=9,
number=4,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="test_compute_loss",
full_name="caffe.SolverParameter.test_compute_loss",
index=10,
number=19,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="test_initialization",
full_name="caffe.SolverParameter.test_initialization",
index=11,
number=32,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=True,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="base_lr",
full_name="caffe.SolverParameter.base_lr",
index=12,
number=5,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="display",
full_name="caffe.SolverParameter.display",
index=13,
number=6,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="average_loss",
full_name="caffe.SolverParameter.average_loss",
index=14,
number=33,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="max_iter",
full_name="caffe.SolverParameter.max_iter",
index=15,
number=7,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="iter_size",
full_name="caffe.SolverParameter.iter_size",
index=16,
number=36,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="lr_policy",
full_name="caffe.SolverParameter.lr_policy",
index=17,
number=8,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="gamma",
full_name="caffe.SolverParameter.gamma",
index=18,
number=9,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="power",
full_name="caffe.SolverParameter.power",
index=19,
number=10,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="momentum",
full_name="caffe.SolverParameter.momentum",
index=20,
number=11,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="weight_decay",
full_name="caffe.SolverParameter.weight_decay",
index=21,
number=12,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="regularization_type",
full_name="caffe.SolverParameter.regularization_type",
index=22,
number=29,
type=9,
cpp_type=9,
label=1,
has_default_value=True,
default_value=_b("L2").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="stepsize",
full_name="caffe.SolverParameter.stepsize",
index=23,
number=13,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="stepvalue",
full_name="caffe.SolverParameter.stepvalue",
index=24,
number=34,
type=5,
cpp_type=1,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="clip_gradients",
full_name="caffe.SolverParameter.clip_gradients",
index=25,
number=35,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=-1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="snapshot",
full_name="caffe.SolverParameter.snapshot",
index=26,
number=14,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="snapshot_prefix",
full_name="caffe.SolverParameter.snapshot_prefix",
index=27,
number=15,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="snapshot_diff",
full_name="caffe.SolverParameter.snapshot_diff",
index=28,
number=16,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="snapshot_format",
full_name="caffe.SolverParameter.snapshot_format",
index=29,
number=37,
type=14,
cpp_type=8,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="solver_mode",
full_name="caffe.SolverParameter.solver_mode",
index=30,
number=17,
type=14,
cpp_type=8,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="device_id",
full_name="caffe.SolverParameter.device_id",
index=31,
number=18,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="random_seed",
full_name="caffe.SolverParameter.random_seed",
index=32,
number=20,
type=3,
cpp_type=2,
label=1,
has_default_value=True,
default_value=-1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="type",
full_name="caffe.SolverParameter.type",
index=33,
number=40,
type=9,
cpp_type=9,
label=1,
has_default_value=True,
default_value=_b("SGD").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="delta",
full_name="caffe.SolverParameter.delta",
index=34,
number=31,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=1e-08,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="momentum2",
full_name="caffe.SolverParameter.momentum2",
index=35,
number=39,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=0.999,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="rms_decay",
full_name="caffe.SolverParameter.rms_decay",
index=36,
number=38,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=0.99,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="debug_info",
full_name="caffe.SolverParameter.debug_info",
index=37,
number=23,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="snapshot_after_train",
full_name="caffe.SolverParameter.snapshot_after_train",
index=38,
number=28,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=True,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="solver_type",
full_name="caffe.SolverParameter.solver_type",
index=39,
number=30,
type=14,
cpp_type=8,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="layer_wise_reduce",
full_name="caffe.SolverParameter.layer_wise_reduce",
index=40,
number=41,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=True,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[
_SOLVERPARAMETER_SNAPSHOTFORMAT,
_SOLVERPARAMETER_SOLVERMODE,
_SOLVERPARAMETER_SOLVERTYPE,
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=986,
serialized_end=2333,
)
_SOLVERSTATE = _descriptor.Descriptor(
name="SolverState",
full_name="caffe.SolverState",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="iter",
full_name="caffe.SolverState.iter",
index=0,
number=1,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="learned_net",
full_name="caffe.SolverState.learned_net",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="history",
full_name="caffe.SolverState.history",
index=2,
number=3,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="current_step",
full_name="caffe.SolverState.current_step",
index=3,
number=4,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=2335,
serialized_end=2443,
)
_NETSTATE = _descriptor.Descriptor(
name="NetState",
full_name="caffe.NetState",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="phase",
full_name="caffe.NetState.phase",
index=0,
number=1,
type=14,
cpp_type=8,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="level",
full_name="caffe.NetState.level",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="stage",
full_name="caffe.NetState.stage",
index=2,
number=3,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=2445,
serialized_end=2523,
)
_NETSTATERULE = _descriptor.Descriptor(
name="NetStateRule",
full_name="caffe.NetStateRule",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="phase",
full_name="caffe.NetStateRule.phase",
index=0,
number=1,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="min_level",
full_name="caffe.NetStateRule.min_level",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="max_level",
full_name="caffe.NetStateRule.max_level",
index=2,
number=3,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="stage",
full_name="caffe.NetStateRule.stage",
index=3,
number=4,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="not_stage",
full_name="caffe.NetStateRule.not_stage",
index=4,
number=5,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=2525,
serialized_end=2640,
)
_PARAMSPEC = _descriptor.Descriptor(
name="ParamSpec",
full_name="caffe.ParamSpec",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="caffe.ParamSpec.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="share_mode",
full_name="caffe.ParamSpec.share_mode",
index=1,
number=2,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="lr_mult",
full_name="caffe.ParamSpec.lr_mult",
index=2,
number=3,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="decay_mult",
full_name="caffe.ParamSpec.decay_mult",
index=3,
number=4,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[_PARAMSPEC_DIMCHECKMODE],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=2643,
serialized_end=2806,
)
_LAYERPARAMETER = _descriptor.Descriptor(
name="LayerParameter",
full_name="caffe.LayerParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="caffe.LayerParameter.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="type",
full_name="caffe.LayerParameter.type",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="bottom",
full_name="caffe.LayerParameter.bottom",
index=2,
number=3,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="top",
full_name="caffe.LayerParameter.top",
index=3,
number=4,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="phase",
full_name="caffe.LayerParameter.phase",
index=4,
number=10,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="loss_weight",
full_name="caffe.LayerParameter.loss_weight",
index=5,
number=5,
type=2,
cpp_type=6,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="param",
full_name="caffe.LayerParameter.param",
index=6,
number=6,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="blobs",
full_name="caffe.LayerParameter.blobs",
index=7,
number=7,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="propagate_down",
full_name="caffe.LayerParameter.propagate_down",
index=8,
number=11,
type=8,
cpp_type=7,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="include",
full_name="caffe.LayerParameter.include",
index=9,
number=8,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="exclude",
full_name="caffe.LayerParameter.exclude",
index=10,
number=9,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="transform_param",
full_name="caffe.LayerParameter.transform_param",
index=11,
number=100,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="loss_param",
full_name="caffe.LayerParameter.loss_param",
index=12,
number=101,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="accuracy_param",
full_name="caffe.LayerParameter.accuracy_param",
index=13,
number=102,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="argmax_param",
full_name="caffe.LayerParameter.argmax_param",
index=14,
number=103,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="batch_norm_param",
full_name="caffe.LayerParameter.batch_norm_param",
index=15,
number=139,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="bias_param",
full_name="caffe.LayerParameter.bias_param",
index=16,
number=141,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="concat_param",
full_name="caffe.LayerParameter.concat_param",
index=17,
number=104,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="contrastive_loss_param",
full_name="caffe.LayerParameter.contrastive_loss_param",
index=18,
number=105,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="convolution_param",
full_name="caffe.LayerParameter.convolution_param",
index=19,
number=106,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="crop_param",
full_name="caffe.LayerParameter.crop_param",
index=20,
number=144,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="data_param",
full_name="caffe.LayerParameter.data_param",
index=21,
number=107,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="dropout_param",
full_name="caffe.LayerParameter.dropout_param",
index=22,
number=108,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="dummy_data_param",
full_name="caffe.LayerParameter.dummy_data_param",
index=23,
number=109,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="eltwise_param",
full_name="caffe.LayerParameter.eltwise_param",
index=24,
number=110,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="elu_param",
full_name="caffe.LayerParameter.elu_param",
index=25,
number=140,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="embed_param",
full_name="caffe.LayerParameter.embed_param",
index=26,
number=137,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="exp_param",
full_name="caffe.LayerParameter.exp_param",
index=27,
number=111,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="flatten_param",
full_name="caffe.LayerParameter.flatten_param",
index=28,
number=135,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="hdf5_data_param",
full_name="caffe.LayerParameter.hdf5_data_param",
index=29,
number=112,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="hdf5_output_param",
full_name="caffe.LayerParameter.hdf5_output_param",
index=30,
number=113,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="hinge_loss_param",
full_name="caffe.LayerParameter.hinge_loss_param",
index=31,
number=114,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="image_data_param",
full_name="caffe.LayerParameter.image_data_param",
index=32,
number=115,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="infogain_loss_param",
full_name="caffe.LayerParameter.infogain_loss_param",
index=33,
number=116,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="inner_product_param",
full_name="caffe.LayerParameter.inner_product_param",
index=34,
number=117,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="input_param",
full_name="caffe.LayerParameter.input_param",
index=35,
number=143,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="log_param",
full_name="caffe.LayerParameter.log_param",
index=36,
number=134,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="lrn_param",
full_name="caffe.LayerParameter.lrn_param",
index=37,
number=118,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="memory_data_param",
full_name="caffe.LayerParameter.memory_data_param",
index=38,
number=119,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="mvn_param",
full_name="caffe.LayerParameter.mvn_param",
index=39,
number=120,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="parameter_param",
full_name="caffe.LayerParameter.parameter_param",
index=40,
number=145,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="pooling_param",
full_name="caffe.LayerParameter.pooling_param",
index=41,
number=121,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="power_param",
full_name="caffe.LayerParameter.power_param",
index=42,
number=122,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="prelu_param",
full_name="caffe.LayerParameter.prelu_param",
index=43,
number=131,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="python_param",
full_name="caffe.LayerParameter.python_param",
index=44,
number=130,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="recurrent_param",
full_name="caffe.LayerParameter.recurrent_param",
index=45,
number=146,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="reduction_param",
full_name="caffe.LayerParameter.reduction_param",
index=46,
number=136,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="relu_param",
full_name="caffe.LayerParameter.relu_param",
index=47,
number=123,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="reshape_param",
full_name="caffe.LayerParameter.reshape_param",
index=48,
number=133,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="scale_param",
full_name="caffe.LayerParameter.scale_param",
index=49,
number=142,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="sigmoid_param",
full_name="caffe.LayerParameter.sigmoid_param",
index=50,
number=124,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="softmax_param",
full_name="caffe.LayerParameter.softmax_param",
index=51,
number=125,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="spp_param",
full_name="caffe.LayerParameter.spp_param",
index=52,
number=132,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="slice_param",
full_name="caffe.LayerParameter.slice_param",
index=53,
number=126,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="tanh_param",
full_name="caffe.LayerParameter.tanh_param",
index=54,
number=127,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="threshold_param",
full_name="caffe.LayerParameter.threshold_param",
index=55,
number=128,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="tile_param",
full_name="caffe.LayerParameter.tile_param",
index=56,
number=138,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="window_data_param",
full_name="caffe.LayerParameter.window_data_param",
index=57,
number=129,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=2809,
serialized_end=5371,
)
_TRANSFORMATIONPARAMETER = _descriptor.Descriptor(
name="TransformationParameter",
full_name="caffe.TransformationParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="scale",
full_name="caffe.TransformationParameter.scale",
index=0,
number=1,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="mirror",
full_name="caffe.TransformationParameter.mirror",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="crop_size",
full_name="caffe.TransformationParameter.crop_size",
index=2,
number=3,
type=13,
cpp_type=3,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="mean_file",
full_name="caffe.TransformationParameter.mean_file",
index=3,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="mean_value",
full_name="caffe.TransformationParameter.mean_value",
index=4,
number=5,
type=2,
cpp_type=6,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="force_color",
full_name="caffe.TransformationParameter.force_color",
index=5,
number=6,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="force_gray",
full_name="caffe.TransformationParameter.force_gray",
index=6,
number=7,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=5374,
serialized_end=5556,
)
_LOSSPARAMETER = _descriptor.Descriptor(
name="LossParameter",
full_name="caffe.LossParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="ignore_label",
full_name="caffe.LossParameter.ignore_label",
index=0,
number=1,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="normalization",
full_name="caffe.LossParameter.normalization",
index=1,
number=3,
type=14,
cpp_type=8,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="normalize",
full_name="caffe.LossParameter.normalize",
index=2,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[_LOSSPARAMETER_NORMALIZATIONMODE],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=5559,
serialized_end=5753,
)
_ACCURACYPARAMETER = _descriptor.Descriptor(
name="AccuracyParameter",
full_name="caffe.AccuracyParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="top_k",
full_name="caffe.AccuracyParameter.top_k",
index=0,
number=1,
type=13,
cpp_type=3,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="axis",
full_name="caffe.AccuracyParameter.axis",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="ignore_label",
full_name="caffe.AccuracyParameter.ignore_label",
index=2,
number=3,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=5755,
serialized_end=5831,
)
_ARGMAXPARAMETER = _descriptor.Descriptor(
name="ArgMaxParameter",
full_name="caffe.ArgMaxParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="out_max_val",
full_name="caffe.ArgMaxParameter.out_max_val",
index=0,
number=1,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="top_k",
full_name="caffe.ArgMaxParameter.top_k",
index=1,
number=2,
type=13,
cpp_type=3,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="axis",
full_name="caffe.ArgMaxParameter.axis",
index=2,
number=3,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=5833,
serialized_end=5910,
)
_CONCATPARAMETER = _descriptor.Descriptor(
name="ConcatParameter",
full_name="caffe.ConcatParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="axis",
full_name="caffe.ConcatParameter.axis",
index=0,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="concat_dim",
full_name="caffe.ConcatParameter.concat_dim",
index=1,
number=1,
type=13,
cpp_type=3,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=5912,
serialized_end=5969,
)
_BATCHNORMPARAMETER = _descriptor.Descriptor(
name="BatchNormParameter",
full_name="caffe.BatchNormParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="use_global_stats",
full_name="caffe.BatchNormParameter.use_global_stats",
index=0,
number=1,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="moving_average_fraction",
full_name="caffe.BatchNormParameter.moving_average_fraction",
index=1,
number=2,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=0.999,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="eps",
full_name="caffe.BatchNormParameter.eps",
index=2,
number=3,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=1e-05,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=5971,
serialized_end=6077,
)
_BIASPARAMETER = _descriptor.Descriptor(
name="BiasParameter",
full_name="caffe.BiasParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="axis",
full_name="caffe.BiasParameter.axis",
index=0,
number=1,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="num_axes",
full_name="caffe.BiasParameter.num_axes",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="filler",
full_name="caffe.BiasParameter.filler",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=6079,
serialized_end=6172,
)
_CONTRASTIVELOSSPARAMETER = _descriptor.Descriptor(
name="ContrastiveLossParameter",
full_name="caffe.ContrastiveLossParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="margin",
full_name="caffe.ContrastiveLossParameter.margin",
index=0,
number=1,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="legacy_version",
full_name="caffe.ContrastiveLossParameter.legacy_version",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=6174,
serialized_end=6250,
)
_CONVOLUTIONPARAMETER = _descriptor.Descriptor(
name="ConvolutionParameter",
full_name="caffe.ConvolutionParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="num_output",
full_name="caffe.ConvolutionParameter.num_output",
index=0,
number=1,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="bias_term",
full_name="caffe.ConvolutionParameter.bias_term",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=True,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="pad",
full_name="caffe.ConvolutionParameter.pad",
index=2,
number=3,
type=13,
cpp_type=3,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="kernel_size",
full_name="caffe.ConvolutionParameter.kernel_size",
index=3,
number=4,
type=13,
cpp_type=3,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="stride",
full_name="caffe.ConvolutionParameter.stride",
index=4,
number=6,
type=13,
cpp_type=3,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="dilation",
full_name="caffe.ConvolutionParameter.dilation",
index=5,
number=18,
type=13,
cpp_type=3,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="pad_h",
full_name="caffe.ConvolutionParameter.pad_h",
index=6,
number=9,
type=13,
cpp_type=3,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="pad_w",
full_name="caffe.ConvolutionParameter.pad_w",
index=7,
number=10,
type=13,
cpp_type=3,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="kernel_h",
full_name="caffe.ConvolutionParameter.kernel_h",
index=8,
number=11,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="kernel_w",
full_name="caffe.ConvolutionParameter.kernel_w",
index=9,
number=12,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="stride_h",
full_name="caffe.ConvolutionParameter.stride_h",
index=10,
number=13,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="stride_w",
full_name="caffe.ConvolutionParameter.stride_w",
index=11,
number=14,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="group",
full_name="caffe.ConvolutionParameter.group",
index=12,
number=5,
type=13,
cpp_type=3,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="weight_filler",
full_name="caffe.ConvolutionParameter.weight_filler",
index=13,
number=7,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="bias_filler",
full_name="caffe.ConvolutionParameter.bias_filler",
index=14,
number=8,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="engine",
full_name="caffe.ConvolutionParameter.engine",
index=15,
number=15,
type=14,
cpp_type=8,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="axis",
full_name="caffe.ConvolutionParameter.axis",
index=16,
number=16,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="force_nd_im2col",
full_name="caffe.ConvolutionParameter.force_nd_im2col",
index=17,
number=17,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[_CONVOLUTIONPARAMETER_ENGINE],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=6253,
serialized_end=6761,
)
_CROPPARAMETER = _descriptor.Descriptor(
name="CropParameter",
full_name="caffe.CropParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="axis",
full_name="caffe.CropParameter.axis",
index=0,
number=1,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=2,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="offset",
full_name="caffe.CropParameter.offset",
index=1,
number=2,
type=13,
cpp_type=3,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=6763,
serialized_end=6811,
)
_DATAPARAMETER = _descriptor.Descriptor(
name="DataParameter",
full_name="caffe.DataParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="source",
full_name="caffe.DataParameter.source",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="batch_size",
full_name="caffe.DataParameter.batch_size",
index=1,
number=4,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="rand_skip",
full_name="caffe.DataParameter.rand_skip",
index=2,
number=7,
type=13,
cpp_type=3,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="backend",
full_name="caffe.DataParameter.backend",
index=3,
number=8,
type=14,
cpp_type=8,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="scale",
full_name="caffe.DataParameter.scale",
index=4,
number=2,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="mean_file",
full_name="caffe.DataParameter.mean_file",
index=5,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="crop_size",
full_name="caffe.DataParameter.crop_size",
index=6,
number=5,
type=13,
cpp_type=3,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="mirror",
full_name="caffe.DataParameter.mirror",
index=7,
number=6,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="force_encoded_color",
full_name="caffe.DataParameter.force_encoded_color",
index=8,
number=9,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="prefetch",
full_name="caffe.DataParameter.prefetch",
index=9,
number=10,
type=13,
cpp_type=3,
label=1,
has_default_value=True,
default_value=4,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[_DATAPARAMETER_DB],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=6814,
serialized_end=7106,
)
_DROPOUTPARAMETER = _descriptor.Descriptor(
name="DropoutParameter",
full_name="caffe.DropoutParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="dropout_ratio",
full_name="caffe.DropoutParameter.dropout_ratio",
index=0,
number=1,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=0.5,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
)
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=7108,
serialized_end=7154,
)
_DUMMYDATAPARAMETER = _descriptor.Descriptor(
name="DummyDataParameter",
full_name="caffe.DummyDataParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="data_filler",
full_name="caffe.DummyDataParameter.data_filler",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="shape",
full_name="caffe.DummyDataParameter.shape",
index=1,
number=6,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="num",
full_name="caffe.DummyDataParameter.num",
index=2,
number=2,
type=13,
cpp_type=3,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="channels",
full_name="caffe.DummyDataParameter.channels",
index=3,
number=3,
type=13,
cpp_type=3,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="height",
full_name="caffe.DummyDataParameter.height",
index=4,
number=4,
type=13,
cpp_type=3,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="width",
full_name="caffe.DummyDataParameter.width",
index=5,
number=5,
type=13,
cpp_type=3,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=7157,
serialized_end=7317,
)
_ELTWISEPARAMETER = _descriptor.Descriptor(
name="EltwiseParameter",
full_name="caffe.EltwiseParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="operation",
full_name="caffe.EltwiseParameter.operation",
index=0,
number=1,
type=14,
cpp_type=8,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="coeff",
full_name="caffe.EltwiseParameter.coeff",
index=1,
number=2,
type=2,
cpp_type=6,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="stable_prod_grad",
full_name="caffe.EltwiseParameter.stable_prod_grad",
index=2,
number=3,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=True,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[_ELTWISEPARAMETER_ELTWISEOP],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=7320,
serialized_end=7485,
)
_ELUPARAMETER = _descriptor.Descriptor(
name="ELUParameter",
full_name="caffe.ELUParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="alpha",
full_name="caffe.ELUParameter.alpha",
index=0,
number=1,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
)
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=7487,
serialized_end=7519,
)
_EMBEDPARAMETER = _descriptor.Descriptor(
name="EmbedParameter",
full_name="caffe.EmbedParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="num_output",
full_name="caffe.EmbedParameter.num_output",
index=0,
number=1,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="input_dim",
full_name="caffe.EmbedParameter.input_dim",
index=1,
number=2,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="bias_term",
full_name="caffe.EmbedParameter.bias_term",
index=2,
number=3,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=True,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="weight_filler",
full_name="caffe.EmbedParameter.weight_filler",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="bias_filler",
full_name="caffe.EmbedParameter.bias_filler",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=7522,
serialized_end=7694,
)
_EXPPARAMETER = _descriptor.Descriptor(
name="ExpParameter",
full_name="caffe.ExpParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="base",
full_name="caffe.ExpParameter.base",
index=0,
number=1,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=-1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="scale",
full_name="caffe.ExpParameter.scale",
index=1,
number=2,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="shift",
full_name="caffe.ExpParameter.shift",
index=2,
number=3,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=7696,
serialized_end=7764,
)
_FLATTENPARAMETER = _descriptor.Descriptor(
name="FlattenParameter",
full_name="caffe.FlattenParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="axis",
full_name="caffe.FlattenParameter.axis",
index=0,
number=1,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="end_axis",
full_name="caffe.FlattenParameter.end_axis",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=-1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=7766,
serialized_end=7823,
)
_HDF5DATAPARAMETER = _descriptor.Descriptor(
name="HDF5DataParameter",
full_name="caffe.HDF5DataParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="source",
full_name="caffe.HDF5DataParameter.source",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="batch_size",
full_name="caffe.HDF5DataParameter.batch_size",
index=1,
number=2,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="shuffle",
full_name="caffe.HDF5DataParameter.shuffle",
index=2,
number=3,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=7825,
serialized_end=7904,
)
_HDF5OUTPUTPARAMETER = _descriptor.Descriptor(
name="HDF5OutputParameter",
full_name="caffe.HDF5OutputParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="file_name",
full_name="caffe.HDF5OutputParameter.file_name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
)
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=7906,
serialized_end=7946,
)
_HINGELOSSPARAMETER = _descriptor.Descriptor(
name="HingeLossParameter",
full_name="caffe.HingeLossParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="norm",
full_name="caffe.HingeLossParameter.norm",
index=0,
number=1,
type=14,
cpp_type=8,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
)
],
extensions=[],
nested_types=[],
enum_types=[_HINGELOSSPARAMETER_NORM],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=7948,
serialized_end=8042,
)
_IMAGEDATAPARAMETER = _descriptor.Descriptor(
name="ImageDataParameter",
full_name="caffe.ImageDataParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="source",
full_name="caffe.ImageDataParameter.source",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="batch_size",
full_name="caffe.ImageDataParameter.batch_size",
index=1,
number=4,
type=13,
cpp_type=3,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="rand_skip",
full_name="caffe.ImageDataParameter.rand_skip",
index=2,
number=7,
type=13,
cpp_type=3,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="shuffle",
full_name="caffe.ImageDataParameter.shuffle",
index=3,
number=8,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="new_height",
full_name="caffe.ImageDataParameter.new_height",
index=4,
number=9,
type=13,
cpp_type=3,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="new_width",
full_name="caffe.ImageDataParameter.new_width",
index=5,
number=10,
type=13,
cpp_type=3,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="is_color",
full_name="caffe.ImageDataParameter.is_color",
index=6,
number=11,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=True,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="scale",
full_name="caffe.ImageDataParameter.scale",
index=7,
number=2,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="mean_file",
full_name="caffe.ImageDataParameter.mean_file",
index=8,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="crop_size",
full_name="caffe.ImageDataParameter.crop_size",
index=9,
number=5,
type=13,
cpp_type=3,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="mirror",
full_name="caffe.ImageDataParameter.mirror",
index=10,
number=6,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="root_folder",
full_name="caffe.ImageDataParameter.root_folder",
index=11,
number=12,
type=9,
cpp_type=9,
label=1,
has_default_value=True,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=8045,
serialized_end=8324,
)
_INFOGAINLOSSPARAMETER = _descriptor.Descriptor(
name="InfogainLossParameter",
full_name="caffe.InfogainLossParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="source",
full_name="caffe.InfogainLossParameter.source",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="axis",
full_name="caffe.InfogainLossParameter.axis",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=8326,
serialized_end=8382,
)
_INNERPRODUCTPARAMETER = _descriptor.Descriptor(
name="InnerProductParameter",
full_name="caffe.InnerProductParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="num_output",
full_name="caffe.InnerProductParameter.num_output",
index=0,
number=1,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="bias_term",
full_name="caffe.InnerProductParameter.bias_term",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=True,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="weight_filler",
full_name="caffe.InnerProductParameter.weight_filler",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="bias_filler",
full_name="caffe.InnerProductParameter.bias_filler",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="axis",
full_name="caffe.InnerProductParameter.axis",
index=4,
number=5,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="transpose",
full_name="caffe.InnerProductParameter.transpose",
index=5,
number=6,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=8385,
serialized_end=8588,
)
_INPUTPARAMETER = _descriptor.Descriptor(
name="InputParameter",
full_name="caffe.InputParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="shape",
full_name="caffe.InputParameter.shape",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
)
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=8590,
serialized_end=8639,
)
_LOGPARAMETER = _descriptor.Descriptor(
name="LogParameter",
full_name="caffe.LogParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="base",
full_name="caffe.LogParameter.base",
index=0,
number=1,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=-1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="scale",
full_name="caffe.LogParameter.scale",
index=1,
number=2,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="shift",
full_name="caffe.LogParameter.shift",
index=2,
number=3,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=8641,
serialized_end=8709,
)
_LRNPARAMETER = _descriptor.Descriptor(
name="LRNParameter",
full_name="caffe.LRNParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="local_size",
full_name="caffe.LRNParameter.local_size",
index=0,
number=1,
type=13,
cpp_type=3,
label=1,
has_default_value=True,
default_value=5,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="alpha",
full_name="caffe.LRNParameter.alpha",
index=1,
number=2,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="beta",
full_name="caffe.LRNParameter.beta",
index=2,
number=3,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=0.75,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="norm_region",
full_name="caffe.LRNParameter.norm_region",
index=3,
number=4,
type=14,
cpp_type=8,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="k",
full_name="caffe.LRNParameter.k",
index=4,
number=5,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="engine",
full_name="caffe.LRNParameter.engine",
index=5,
number=6,
type=14,
cpp_type=8,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[_LRNPARAMETER_NORMREGION, _LRNPARAMETER_ENGINE],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=8712,
serialized_end=9024,
)
_MEMORYDATAPARAMETER = _descriptor.Descriptor(
name="MemoryDataParameter",
full_name="caffe.MemoryDataParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="batch_size",
full_name="caffe.MemoryDataParameter.batch_size",
index=0,
number=1,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="channels",
full_name="caffe.MemoryDataParameter.channels",
index=1,
number=2,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="height",
full_name="caffe.MemoryDataParameter.height",
index=2,
number=3,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="width",
full_name="caffe.MemoryDataParameter.width",
index=3,
number=4,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=9026,
serialized_end=9116,
)
_MVNPARAMETER = _descriptor.Descriptor(
name="MVNParameter",
full_name="caffe.MVNParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="normalize_variance",
full_name="caffe.MVNParameter.normalize_variance",
index=0,
number=1,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=True,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="across_channels",
full_name="caffe.MVNParameter.across_channels",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="eps",
full_name="caffe.MVNParameter.eps",
index=2,
number=3,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=1e-09,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=9118,
serialized_end=9218,
)
_PARAMETERPARAMETER = _descriptor.Descriptor(
name="ParameterParameter",
full_name="caffe.ParameterParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="shape",
full_name="caffe.ParameterParameter.shape",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
)
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=9220,
serialized_end=9273,
)
_POOLINGPARAMETER = _descriptor.Descriptor(
name="PoolingParameter",
full_name="caffe.PoolingParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="pool",
full_name="caffe.PoolingParameter.pool",
index=0,
number=1,
type=14,
cpp_type=8,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="pad",
full_name="caffe.PoolingParameter.pad",
index=1,
number=4,
type=13,
cpp_type=3,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="pad_h",
full_name="caffe.PoolingParameter.pad_h",
index=2,
number=9,
type=13,
cpp_type=3,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="pad_w",
full_name="caffe.PoolingParameter.pad_w",
index=3,
number=10,
type=13,
cpp_type=3,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="kernel_size",
full_name="caffe.PoolingParameter.kernel_size",
index=4,
number=2,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="kernel_h",
full_name="caffe.PoolingParameter.kernel_h",
index=5,
number=5,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="kernel_w",
full_name="caffe.PoolingParameter.kernel_w",
index=6,
number=6,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="stride",
full_name="caffe.PoolingParameter.stride",
index=7,
number=3,
type=13,
cpp_type=3,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="stride_h",
full_name="caffe.PoolingParameter.stride_h",
index=8,
number=7,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="stride_w",
full_name="caffe.PoolingParameter.stride_w",
index=9,
number=8,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="engine",
full_name="caffe.PoolingParameter.engine",
index=10,
number=11,
type=14,
cpp_type=8,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="global_pooling",
full_name="caffe.PoolingParameter.global_pooling",
index=11,
number=12,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[_POOLINGPARAMETER_POOLMETHOD, _POOLINGPARAMETER_ENGINE],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=9276,
serialized_end=9694,
)
_POWERPARAMETER = _descriptor.Descriptor(
name="PowerParameter",
full_name="caffe.PowerParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="power",
full_name="caffe.PowerParameter.power",
index=0,
number=1,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="scale",
full_name="caffe.PowerParameter.scale",
index=1,
number=2,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="shift",
full_name="caffe.PowerParameter.shift",
index=2,
number=3,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=9696,
serialized_end=9766,
)
_PYTHONPARAMETER = _descriptor.Descriptor(
name="PythonParameter",
full_name="caffe.PythonParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="module",
full_name="caffe.PythonParameter.module",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="layer",
full_name="caffe.PythonParameter.layer",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="param_str",
full_name="caffe.PythonParameter.param_str",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=True,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="share_in_parallel",
full_name="caffe.PythonParameter.share_in_parallel",
index=3,
number=4,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=9768,
serialized_end=9871,
)
_RECURRENTPARAMETER = _descriptor.Descriptor(
name="RecurrentParameter",
full_name="caffe.RecurrentParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="num_output",
full_name="caffe.RecurrentParameter.num_output",
index=0,
number=1,
type=13,
cpp_type=3,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="weight_filler",
full_name="caffe.RecurrentParameter.weight_filler",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="bias_filler",
full_name="caffe.RecurrentParameter.bias_filler",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="debug_info",
full_name="caffe.RecurrentParameter.debug_info",
index=3,
number=4,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="expose_hidden",
full_name="caffe.RecurrentParameter.expose_hidden",
index=4,
number=5,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=9874,
serialized_end=10066,
)
_REDUCTIONPARAMETER = _descriptor.Descriptor(
name="ReductionParameter",
full_name="caffe.ReductionParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="operation",
full_name="caffe.ReductionParameter.operation",
index=0,
number=1,
type=14,
cpp_type=8,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="axis",
full_name="caffe.ReductionParameter.axis",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="coeff",
full_name="caffe.ReductionParameter.coeff",
index=2,
number=3,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[_REDUCTIONPARAMETER_REDUCTIONOP],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=10069,
serialized_end=10242,
)
_RELUPARAMETER = _descriptor.Descriptor(
name="ReLUParameter",
full_name="caffe.ReLUParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="negative_slope",
full_name="caffe.ReLUParameter.negative_slope",
index=0,
number=1,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="engine",
full_name="caffe.ReLUParameter.engine",
index=1,
number=2,
type=14,
cpp_type=8,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[_RELUPARAMETER_ENGINE],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=10245,
serialized_end=10386,
)
_RESHAPEPARAMETER = _descriptor.Descriptor(
name="ReshapeParameter",
full_name="caffe.ReshapeParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="shape",
full_name="caffe.ReshapeParameter.shape",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="axis",
full_name="caffe.ReshapeParameter.axis",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="num_axes",
full_name="caffe.ReshapeParameter.num_axes",
index=2,
number=3,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=-1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=10388,
serialized_end=10478,
)
_SCALEPARAMETER = _descriptor.Descriptor(
name="ScaleParameter",
full_name="caffe.ScaleParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="axis",
full_name="caffe.ScaleParameter.axis",
index=0,
number=1,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="num_axes",
full_name="caffe.ScaleParameter.num_axes",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="filler",
full_name="caffe.ScaleParameter.filler",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="bias_term",
full_name="caffe.ScaleParameter.bias_term",
index=3,
number=4,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="bias_filler",
full_name="caffe.ScaleParameter.bias_filler",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=10481,
serialized_end=10646,
)
_SIGMOIDPARAMETER = _descriptor.Descriptor(
name="SigmoidParameter",
full_name="caffe.SigmoidParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="engine",
full_name="caffe.SigmoidParameter.engine",
index=0,
number=1,
type=14,
cpp_type=8,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
)
],
extensions=[],
nested_types=[],
enum_types=[_SIGMOIDPARAMETER_ENGINE],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=10648,
serialized_end=10768,
)
_SLICEPARAMETER = _descriptor.Descriptor(
name="SliceParameter",
full_name="caffe.SliceParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="axis",
full_name="caffe.SliceParameter.axis",
index=0,
number=3,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="slice_point",
full_name="caffe.SliceParameter.slice_point",
index=1,
number=2,
type=13,
cpp_type=3,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="slice_dim",
full_name="caffe.SliceParameter.slice_dim",
index=2,
number=1,
type=13,
cpp_type=3,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=10770,
serialized_end=10846,
)
_SOFTMAXPARAMETER = _descriptor.Descriptor(
name="SoftmaxParameter",
full_name="caffe.SoftmaxParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="engine",
full_name="caffe.SoftmaxParameter.engine",
index=0,
number=1,
type=14,
cpp_type=8,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="axis",
full_name="caffe.SoftmaxParameter.axis",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[_SOFTMAXPARAMETER_ENGINE],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=10849,
serialized_end=10986,
)
_TANHPARAMETER = _descriptor.Descriptor(
name="TanHParameter",
full_name="caffe.TanHParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="engine",
full_name="caffe.TanHParameter.engine",
index=0,
number=1,
type=14,
cpp_type=8,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
)
],
extensions=[],
nested_types=[],
enum_types=[_TANHPARAMETER_ENGINE],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=10988,
serialized_end=11102,
)
_TILEPARAMETER = _descriptor.Descriptor(
name="TileParameter",
full_name="caffe.TileParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="axis",
full_name="caffe.TileParameter.axis",
index=0,
number=1,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="tiles",
full_name="caffe.TileParameter.tiles",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=11104,
serialized_end=11151,
)
_THRESHOLDPARAMETER = _descriptor.Descriptor(
name="ThresholdParameter",
full_name="caffe.ThresholdParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="threshold",
full_name="caffe.ThresholdParameter.threshold",
index=0,
number=1,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
)
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=11153,
serialized_end=11195,
)
_WINDOWDATAPARAMETER = _descriptor.Descriptor(
name="WindowDataParameter",
full_name="caffe.WindowDataParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="source",
full_name="caffe.WindowDataParameter.source",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="scale",
full_name="caffe.WindowDataParameter.scale",
index=1,
number=2,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="mean_file",
full_name="caffe.WindowDataParameter.mean_file",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="batch_size",
full_name="caffe.WindowDataParameter.batch_size",
index=3,
number=4,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="crop_size",
full_name="caffe.WindowDataParameter.crop_size",
index=4,
number=5,
type=13,
cpp_type=3,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="mirror",
full_name="caffe.WindowDataParameter.mirror",
index=5,
number=6,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="fg_threshold",
full_name="caffe.WindowDataParameter.fg_threshold",
index=6,
number=7,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=0.5,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="bg_threshold",
full_name="caffe.WindowDataParameter.bg_threshold",
index=7,
number=8,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=0.5,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="fg_fraction",
full_name="caffe.WindowDataParameter.fg_fraction",
index=8,
number=9,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=0.25,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="context_pad",
full_name="caffe.WindowDataParameter.context_pad",
index=9,
number=10,
type=13,
cpp_type=3,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="crop_mode",
full_name="caffe.WindowDataParameter.crop_mode",
index=10,
number=11,
type=9,
cpp_type=9,
label=1,
has_default_value=True,
default_value=_b("warp").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="cache_images",
full_name="caffe.WindowDataParameter.cache_images",
index=11,
number=12,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="root_folder",
full_name="caffe.WindowDataParameter.root_folder",
index=12,
number=13,
type=9,
cpp_type=9,
label=1,
has_default_value=True,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=11198,
serialized_end=11519,
)
_SPPPARAMETER = _descriptor.Descriptor(
name="SPPParameter",
full_name="caffe.SPPParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="pyramid_height",
full_name="caffe.SPPParameter.pyramid_height",
index=0,
number=1,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="pool",
full_name="caffe.SPPParameter.pool",
index=1,
number=2,
type=14,
cpp_type=8,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="engine",
full_name="caffe.SPPParameter.engine",
index=2,
number=6,
type=14,
cpp_type=8,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[_SPPPARAMETER_POOLMETHOD, _SPPPARAMETER_ENGINE],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=11522,
serialized_end=11757,
)
_V1LAYERPARAMETER = _descriptor.Descriptor(
name="V1LayerParameter",
full_name="caffe.V1LayerParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="bottom",
full_name="caffe.V1LayerParameter.bottom",
index=0,
number=2,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="top",
full_name="caffe.V1LayerParameter.top",
index=1,
number=3,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="name",
full_name="caffe.V1LayerParameter.name",
index=2,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="include",
full_name="caffe.V1LayerParameter.include",
index=3,
number=32,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="exclude",
full_name="caffe.V1LayerParameter.exclude",
index=4,
number=33,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="type",
full_name="caffe.V1LayerParameter.type",
index=5,
number=5,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="blobs",
full_name="caffe.V1LayerParameter.blobs",
index=6,
number=6,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="param",
full_name="caffe.V1LayerParameter.param",
index=7,
number=1001,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="blob_share_mode",
full_name="caffe.V1LayerParameter.blob_share_mode",
index=8,
number=1002,
type=14,
cpp_type=8,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="blobs_lr",
full_name="caffe.V1LayerParameter.blobs_lr",
index=9,
number=7,
type=2,
cpp_type=6,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="weight_decay",
full_name="caffe.V1LayerParameter.weight_decay",
index=10,
number=8,
type=2,
cpp_type=6,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="loss_weight",
full_name="caffe.V1LayerParameter.loss_weight",
index=11,
number=35,
type=2,
cpp_type=6,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="accuracy_param",
full_name="caffe.V1LayerParameter.accuracy_param",
index=12,
number=27,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="argmax_param",
full_name="caffe.V1LayerParameter.argmax_param",
index=13,
number=23,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="concat_param",
full_name="caffe.V1LayerParameter.concat_param",
index=14,
number=9,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="contrastive_loss_param",
full_name="caffe.V1LayerParameter.contrastive_loss_param",
index=15,
number=40,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="convolution_param",
full_name="caffe.V1LayerParameter.convolution_param",
index=16,
number=10,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="data_param",
full_name="caffe.V1LayerParameter.data_param",
index=17,
number=11,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="dropout_param",
full_name="caffe.V1LayerParameter.dropout_param",
index=18,
number=12,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="dummy_data_param",
full_name="caffe.V1LayerParameter.dummy_data_param",
index=19,
number=26,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="eltwise_param",
full_name="caffe.V1LayerParameter.eltwise_param",
index=20,
number=24,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="exp_param",
full_name="caffe.V1LayerParameter.exp_param",
index=21,
number=41,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="hdf5_data_param",
full_name="caffe.V1LayerParameter.hdf5_data_param",
index=22,
number=13,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="hdf5_output_param",
full_name="caffe.V1LayerParameter.hdf5_output_param",
index=23,
number=14,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="hinge_loss_param",
full_name="caffe.V1LayerParameter.hinge_loss_param",
index=24,
number=29,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="image_data_param",
full_name="caffe.V1LayerParameter.image_data_param",
index=25,
number=15,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="infogain_loss_param",
full_name="caffe.V1LayerParameter.infogain_loss_param",
index=26,
number=16,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="inner_product_param",
full_name="caffe.V1LayerParameter.inner_product_param",
index=27,
number=17,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="lrn_param",
full_name="caffe.V1LayerParameter.lrn_param",
index=28,
number=18,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="memory_data_param",
full_name="caffe.V1LayerParameter.memory_data_param",
index=29,
number=22,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="mvn_param",
full_name="caffe.V1LayerParameter.mvn_param",
index=30,
number=34,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="pooling_param",
full_name="caffe.V1LayerParameter.pooling_param",
index=31,
number=19,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="power_param",
full_name="caffe.V1LayerParameter.power_param",
index=32,
number=21,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="relu_param",
full_name="caffe.V1LayerParameter.relu_param",
index=33,
number=30,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="sigmoid_param",
full_name="caffe.V1LayerParameter.sigmoid_param",
index=34,
number=38,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="softmax_param",
full_name="caffe.V1LayerParameter.softmax_param",
index=35,
number=39,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="slice_param",
full_name="caffe.V1LayerParameter.slice_param",
index=36,
number=31,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="tanh_param",
full_name="caffe.V1LayerParameter.tanh_param",
index=37,
number=37,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="threshold_param",
full_name="caffe.V1LayerParameter.threshold_param",
index=38,
number=25,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="window_data_param",
full_name="caffe.V1LayerParameter.window_data_param",
index=39,
number=20,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="transform_param",
full_name="caffe.V1LayerParameter.transform_param",
index=40,
number=36,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="loss_param",
full_name="caffe.V1LayerParameter.loss_param",
index=41,
number=42,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="layer",
full_name="caffe.V1LayerParameter.layer",
index=42,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[_V1LAYERPARAMETER_LAYERTYPE, _V1LAYERPARAMETER_DIMCHECKMODE],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=11760,
serialized_end=14288,
)
_V0LAYERPARAMETER = _descriptor.Descriptor(
name="V0LayerParameter",
full_name="caffe.V0LayerParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="caffe.V0LayerParameter.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="type",
full_name="caffe.V0LayerParameter.type",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="num_output",
full_name="caffe.V0LayerParameter.num_output",
index=2,
number=3,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="biasterm",
full_name="caffe.V0LayerParameter.biasterm",
index=3,
number=4,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=True,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="weight_filler",
full_name="caffe.V0LayerParameter.weight_filler",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="bias_filler",
full_name="caffe.V0LayerParameter.bias_filler",
index=5,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="pad",
full_name="caffe.V0LayerParameter.pad",
index=6,
number=7,
type=13,
cpp_type=3,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="kernelsize",
full_name="caffe.V0LayerParameter.kernelsize",
index=7,
number=8,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="group",
full_name="caffe.V0LayerParameter.group",
index=8,
number=9,
type=13,
cpp_type=3,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="stride",
full_name="caffe.V0LayerParameter.stride",
index=9,
number=10,
type=13,
cpp_type=3,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="pool",
full_name="caffe.V0LayerParameter.pool",
index=10,
number=11,
type=14,
cpp_type=8,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="dropout_ratio",
full_name="caffe.V0LayerParameter.dropout_ratio",
index=11,
number=12,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=0.5,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="local_size",
full_name="caffe.V0LayerParameter.local_size",
index=12,
number=13,
type=13,
cpp_type=3,
label=1,
has_default_value=True,
default_value=5,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="alpha",
full_name="caffe.V0LayerParameter.alpha",
index=13,
number=14,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="beta",
full_name="caffe.V0LayerParameter.beta",
index=14,
number=15,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=0.75,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="k",
full_name="caffe.V0LayerParameter.k",
index=15,
number=22,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="source",
full_name="caffe.V0LayerParameter.source",
index=16,
number=16,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="scale",
full_name="caffe.V0LayerParameter.scale",
index=17,
number=17,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="meanfile",
full_name="caffe.V0LayerParameter.meanfile",
index=18,
number=18,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="batchsize",
full_name="caffe.V0LayerParameter.batchsize",
index=19,
number=19,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="cropsize",
full_name="caffe.V0LayerParameter.cropsize",
index=20,
number=20,
type=13,
cpp_type=3,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="mirror",
full_name="caffe.V0LayerParameter.mirror",
index=21,
number=21,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="blobs",
full_name="caffe.V0LayerParameter.blobs",
index=22,
number=50,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="blobs_lr",
full_name="caffe.V0LayerParameter.blobs_lr",
index=23,
number=51,
type=2,
cpp_type=6,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="weight_decay",
full_name="caffe.V0LayerParameter.weight_decay",
index=24,
number=52,
type=2,
cpp_type=6,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="rand_skip",
full_name="caffe.V0LayerParameter.rand_skip",
index=25,
number=53,
type=13,
cpp_type=3,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="det_fg_threshold",
full_name="caffe.V0LayerParameter.det_fg_threshold",
index=26,
number=54,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=0.5,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="det_bg_threshold",
full_name="caffe.V0LayerParameter.det_bg_threshold",
index=27,
number=55,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=0.5,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="det_fg_fraction",
full_name="caffe.V0LayerParameter.det_fg_fraction",
index=28,
number=56,
type=2,
cpp_type=6,
label=1,
has_default_value=True,
default_value=0.25,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="det_context_pad",
full_name="caffe.V0LayerParameter.det_context_pad",
index=29,
number=58,
type=13,
cpp_type=3,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="det_crop_mode",
full_name="caffe.V0LayerParameter.det_crop_mode",
index=30,
number=59,
type=9,
cpp_type=9,
label=1,
has_default_value=True,
default_value=_b("warp").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="new_num",
full_name="caffe.V0LayerParameter.new_num",
index=31,
number=60,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="new_channels",
full_name="caffe.V0LayerParameter.new_channels",
index=32,
number=61,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="new_height",
full_name="caffe.V0LayerParameter.new_height",
index=33,
number=62,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="new_width",
full_name="caffe.V0LayerParameter.new_width",
index=34,
number=63,
type=5,
cpp_type=1,
label=1,
has_default_value=True,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="shuffle_images",
full_name="caffe.V0LayerParameter.shuffle_images",
index=35,
number=64,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="concat_dim",
full_name="caffe.V0LayerParameter.concat_dim",
index=36,
number=65,
type=13,
cpp_type=3,
label=1,
has_default_value=True,
default_value=1,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="hdf5_output_param",
full_name="caffe.V0LayerParameter.hdf5_output_param",
index=37,
number=1001,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[_V0LAYERPARAMETER_POOLMETHOD],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=14291,
serialized_end=15312,
)
_PRELUPARAMETER = _descriptor.Descriptor(
name="PReLUParameter",
full_name="caffe.PReLUParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="filler",
full_name="caffe.PReLUParameter.filler",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="channel_shared",
full_name="caffe.PReLUParameter.channel_shared",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=True,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[],
serialized_start=15314,
serialized_end=15401,
)
_BLOBPROTO.fields_by_name["shape"].message_type = _BLOBSHAPE
_BLOBPROTOVECTOR.fields_by_name["blobs"].message_type = _BLOBPROTO
_FILLERPARAMETER.fields_by_name[
"variance_norm"
].enum_type = _FILLERPARAMETER_VARIANCENORM
_FILLERPARAMETER_VARIANCENORM.containing_type = _FILLERPARAMETER
_NETPARAMETER.fields_by_name["input_shape"].message_type = _BLOBSHAPE
_NETPARAMETER.fields_by_name["state"].message_type = _NETSTATE
_NETPARAMETER.fields_by_name["layer"].message_type = _LAYERPARAMETER
_NETPARAMETER.fields_by_name["layers"].message_type = _V1LAYERPARAMETER
_SOLVERPARAMETER.fields_by_name["net_param"].message_type = _NETPARAMETER
_SOLVERPARAMETER.fields_by_name["train_net_param"].message_type = _NETPARAMETER
_SOLVERPARAMETER.fields_by_name["test_net_param"].message_type = _NETPARAMETER
_SOLVERPARAMETER.fields_by_name["train_state"].message_type = _NETSTATE
_SOLVERPARAMETER.fields_by_name["test_state"].message_type = _NETSTATE
_SOLVERPARAMETER.fields_by_name[
"snapshot_format"
].enum_type = _SOLVERPARAMETER_SNAPSHOTFORMAT
_SOLVERPARAMETER.fields_by_name["solver_mode"].enum_type = _SOLVERPARAMETER_SOLVERMODE
_SOLVERPARAMETER.fields_by_name["solver_type"].enum_type = _SOLVERPARAMETER_SOLVERTYPE
_SOLVERPARAMETER_SNAPSHOTFORMAT.containing_type = _SOLVERPARAMETER
_SOLVERPARAMETER_SOLVERMODE.containing_type = _SOLVERPARAMETER
_SOLVERPARAMETER_SOLVERTYPE.containing_type = _SOLVERPARAMETER
_SOLVERSTATE.fields_by_name["history"].message_type = _BLOBPROTO
_NETSTATE.fields_by_name["phase"].enum_type = _PHASE
_NETSTATERULE.fields_by_name["phase"].enum_type = _PHASE
_PARAMSPEC.fields_by_name["share_mode"].enum_type = _PARAMSPEC_DIMCHECKMODE
_PARAMSPEC_DIMCHECKMODE.containing_type = _PARAMSPEC
_LAYERPARAMETER.fields_by_name["phase"].enum_type = _PHASE
_LAYERPARAMETER.fields_by_name["param"].message_type = _PARAMSPEC
_LAYERPARAMETER.fields_by_name["blobs"].message_type = _BLOBPROTO
_LAYERPARAMETER.fields_by_name["include"].message_type = _NETSTATERULE
_LAYERPARAMETER.fields_by_name["exclude"].message_type = _NETSTATERULE
_LAYERPARAMETER.fields_by_name[
"transform_param"
].message_type = _TRANSFORMATIONPARAMETER
_LAYERPARAMETER.fields_by_name["loss_param"].message_type = _LOSSPARAMETER
_LAYERPARAMETER.fields_by_name["accuracy_param"].message_type = _ACCURACYPARAMETER
_LAYERPARAMETER.fields_by_name["argmax_param"].message_type = _ARGMAXPARAMETER
_LAYERPARAMETER.fields_by_name["batch_norm_param"].message_type = _BATCHNORMPARAMETER
_LAYERPARAMETER.fields_by_name["bias_param"].message_type = _BIASPARAMETER
_LAYERPARAMETER.fields_by_name["concat_param"].message_type = _CONCATPARAMETER
_LAYERPARAMETER.fields_by_name[
"contrastive_loss_param"
].message_type = _CONTRASTIVELOSSPARAMETER
_LAYERPARAMETER.fields_by_name["convolution_param"].message_type = _CONVOLUTIONPARAMETER
_LAYERPARAMETER.fields_by_name["crop_param"].message_type = _CROPPARAMETER
_LAYERPARAMETER.fields_by_name["data_param"].message_type = _DATAPARAMETER
_LAYERPARAMETER.fields_by_name["dropout_param"].message_type = _DROPOUTPARAMETER
_LAYERPARAMETER.fields_by_name["dummy_data_param"].message_type = _DUMMYDATAPARAMETER
_LAYERPARAMETER.fields_by_name["eltwise_param"].message_type = _ELTWISEPARAMETER
_LAYERPARAMETER.fields_by_name["elu_param"].message_type = _ELUPARAMETER
_LAYERPARAMETER.fields_by_name["embed_param"].message_type = _EMBEDPARAMETER
_LAYERPARAMETER.fields_by_name["exp_param"].message_type = _EXPPARAMETER
_LAYERPARAMETER.fields_by_name["flatten_param"].message_type = _FLATTENPARAMETER
_LAYERPARAMETER.fields_by_name["hdf5_data_param"].message_type = _HDF5DATAPARAMETER
_LAYERPARAMETER.fields_by_name["hdf5_output_param"].message_type = _HDF5OUTPUTPARAMETER
_LAYERPARAMETER.fields_by_name["hinge_loss_param"].message_type = _HINGELOSSPARAMETER
_LAYERPARAMETER.fields_by_name["image_data_param"].message_type = _IMAGEDATAPARAMETER
_LAYERPARAMETER.fields_by_name[
"infogain_loss_param"
].message_type = _INFOGAINLOSSPARAMETER
_LAYERPARAMETER.fields_by_name[
"inner_product_param"
].message_type = _INNERPRODUCTPARAMETER
_LAYERPARAMETER.fields_by_name["input_param"].message_type = _INPUTPARAMETER
_LAYERPARAMETER.fields_by_name["log_param"].message_type = _LOGPARAMETER
_LAYERPARAMETER.fields_by_name["lrn_param"].message_type = _LRNPARAMETER
_LAYERPARAMETER.fields_by_name["memory_data_param"].message_type = _MEMORYDATAPARAMETER
_LAYERPARAMETER.fields_by_name["mvn_param"].message_type = _MVNPARAMETER
_LAYERPARAMETER.fields_by_name["parameter_param"].message_type = _PARAMETERPARAMETER
_LAYERPARAMETER.fields_by_name["pooling_param"].message_type = _POOLINGPARAMETER
_LAYERPARAMETER.fields_by_name["power_param"].message_type = _POWERPARAMETER
_LAYERPARAMETER.fields_by_name["prelu_param"].message_type = _PRELUPARAMETER
_LAYERPARAMETER.fields_by_name["python_param"].message_type = _PYTHONPARAMETER
_LAYERPARAMETER.fields_by_name["recurrent_param"].message_type = _RECURRENTPARAMETER
_LAYERPARAMETER.fields_by_name["reduction_param"].message_type = _REDUCTIONPARAMETER
_LAYERPARAMETER.fields_by_name["relu_param"].message_type = _RELUPARAMETER
_LAYERPARAMETER.fields_by_name["reshape_param"].message_type = _RESHAPEPARAMETER
_LAYERPARAMETER.fields_by_name["scale_param"].message_type = _SCALEPARAMETER
_LAYERPARAMETER.fields_by_name["sigmoid_param"].message_type = _SIGMOIDPARAMETER
_LAYERPARAMETER.fields_by_name["softmax_param"].message_type = _SOFTMAXPARAMETER
_LAYERPARAMETER.fields_by_name["spp_param"].message_type = _SPPPARAMETER
_LAYERPARAMETER.fields_by_name["slice_param"].message_type = _SLICEPARAMETER
_LAYERPARAMETER.fields_by_name["tanh_param"].message_type = _TANHPARAMETER
_LAYERPARAMETER.fields_by_name["threshold_param"].message_type = _THRESHOLDPARAMETER
_LAYERPARAMETER.fields_by_name["tile_param"].message_type = _TILEPARAMETER
_LAYERPARAMETER.fields_by_name["window_data_param"].message_type = _WINDOWDATAPARAMETER
_LOSSPARAMETER.fields_by_name[
"normalization"
].enum_type = _LOSSPARAMETER_NORMALIZATIONMODE
_LOSSPARAMETER_NORMALIZATIONMODE.containing_type = _LOSSPARAMETER
_BIASPARAMETER.fields_by_name["filler"].message_type = _FILLERPARAMETER
_CONVOLUTIONPARAMETER.fields_by_name["weight_filler"].message_type = _FILLERPARAMETER
_CONVOLUTIONPARAMETER.fields_by_name["bias_filler"].message_type = _FILLERPARAMETER
_CONVOLUTIONPARAMETER.fields_by_name["engine"].enum_type = _CONVOLUTIONPARAMETER_ENGINE
_CONVOLUTIONPARAMETER_ENGINE.containing_type = _CONVOLUTIONPARAMETER
_DATAPARAMETER.fields_by_name["backend"].enum_type = _DATAPARAMETER_DB
_DATAPARAMETER_DB.containing_type = _DATAPARAMETER
_DUMMYDATAPARAMETER.fields_by_name["data_filler"].message_type = _FILLERPARAMETER
_DUMMYDATAPARAMETER.fields_by_name["shape"].message_type = _BLOBSHAPE
_ELTWISEPARAMETER.fields_by_name["operation"].enum_type = _ELTWISEPARAMETER_ELTWISEOP
_ELTWISEPARAMETER_ELTWISEOP.containing_type = _ELTWISEPARAMETER
_EMBEDPARAMETER.fields_by_name["weight_filler"].message_type = _FILLERPARAMETER
_EMBEDPARAMETER.fields_by_name["bias_filler"].message_type = _FILLERPARAMETER
_HINGELOSSPARAMETER.fields_by_name["norm"].enum_type = _HINGELOSSPARAMETER_NORM
_HINGELOSSPARAMETER_NORM.containing_type = _HINGELOSSPARAMETER
_INNERPRODUCTPARAMETER.fields_by_name["weight_filler"].message_type = _FILLERPARAMETER
_INNERPRODUCTPARAMETER.fields_by_name["bias_filler"].message_type = _FILLERPARAMETER
_INPUTPARAMETER.fields_by_name["shape"].message_type = _BLOBSHAPE
_LRNPARAMETER.fields_by_name["norm_region"].enum_type = _LRNPARAMETER_NORMREGION
_LRNPARAMETER.fields_by_name["engine"].enum_type = _LRNPARAMETER_ENGINE
_LRNPARAMETER_NORMREGION.containing_type = _LRNPARAMETER
_LRNPARAMETER_ENGINE.containing_type = _LRNPARAMETER
_PARAMETERPARAMETER.fields_by_name["shape"].message_type = _BLOBSHAPE
_POOLINGPARAMETER.fields_by_name["pool"].enum_type = _POOLINGPARAMETER_POOLMETHOD
_POOLINGPARAMETER.fields_by_name["engine"].enum_type = _POOLINGPARAMETER_ENGINE
_POOLINGPARAMETER_POOLMETHOD.containing_type = _POOLINGPARAMETER
_POOLINGPARAMETER_ENGINE.containing_type = _POOLINGPARAMETER
_RECURRENTPARAMETER.fields_by_name["weight_filler"].message_type = _FILLERPARAMETER
_RECURRENTPARAMETER.fields_by_name["bias_filler"].message_type = _FILLERPARAMETER
_REDUCTIONPARAMETER.fields_by_name[
"operation"
].enum_type = _REDUCTIONPARAMETER_REDUCTIONOP
_REDUCTIONPARAMETER_REDUCTIONOP.containing_type = _REDUCTIONPARAMETER
_RELUPARAMETER.fields_by_name["engine"].enum_type = _RELUPARAMETER_ENGINE
_RELUPARAMETER_ENGINE.containing_type = _RELUPARAMETER
_RESHAPEPARAMETER.fields_by_name["shape"].message_type = _BLOBSHAPE
_SCALEPARAMETER.fields_by_name["filler"].message_type = _FILLERPARAMETER
_SCALEPARAMETER.fields_by_name["bias_filler"].message_type = _FILLERPARAMETER
_SIGMOIDPARAMETER.fields_by_name["engine"].enum_type = _SIGMOIDPARAMETER_ENGINE
_SIGMOIDPARAMETER_ENGINE.containing_type = _SIGMOIDPARAMETER
_SOFTMAXPARAMETER.fields_by_name["engine"].enum_type = _SOFTMAXPARAMETER_ENGINE
_SOFTMAXPARAMETER_ENGINE.containing_type = _SOFTMAXPARAMETER
_TANHPARAMETER.fields_by_name["engine"].enum_type = _TANHPARAMETER_ENGINE
_TANHPARAMETER_ENGINE.containing_type = _TANHPARAMETER
_SPPPARAMETER.fields_by_name["pool"].enum_type = _SPPPARAMETER_POOLMETHOD
_SPPPARAMETER.fields_by_name["engine"].enum_type = _SPPPARAMETER_ENGINE
_SPPPARAMETER_POOLMETHOD.containing_type = _SPPPARAMETER
_SPPPARAMETER_ENGINE.containing_type = _SPPPARAMETER
_V1LAYERPARAMETER.fields_by_name["include"].message_type = _NETSTATERULE
_V1LAYERPARAMETER.fields_by_name["exclude"].message_type = _NETSTATERULE
_V1LAYERPARAMETER.fields_by_name["type"].enum_type = _V1LAYERPARAMETER_LAYERTYPE
_V1LAYERPARAMETER.fields_by_name["blobs"].message_type = _BLOBPROTO
_V1LAYERPARAMETER.fields_by_name[
"blob_share_mode"
].enum_type = _V1LAYERPARAMETER_DIMCHECKMODE
_V1LAYERPARAMETER.fields_by_name["accuracy_param"].message_type = _ACCURACYPARAMETER
_V1LAYERPARAMETER.fields_by_name["argmax_param"].message_type = _ARGMAXPARAMETER
_V1LAYERPARAMETER.fields_by_name["concat_param"].message_type = _CONCATPARAMETER
_V1LAYERPARAMETER.fields_by_name[
"contrastive_loss_param"
].message_type = _CONTRASTIVELOSSPARAMETER
_V1LAYERPARAMETER.fields_by_name[
"convolution_param"
].message_type = _CONVOLUTIONPARAMETER
_V1LAYERPARAMETER.fields_by_name["data_param"].message_type = _DATAPARAMETER
_V1LAYERPARAMETER.fields_by_name["dropout_param"].message_type = _DROPOUTPARAMETER
_V1LAYERPARAMETER.fields_by_name["dummy_data_param"].message_type = _DUMMYDATAPARAMETER
_V1LAYERPARAMETER.fields_by_name["eltwise_param"].message_type = _ELTWISEPARAMETER
_V1LAYERPARAMETER.fields_by_name["exp_param"].message_type = _EXPPARAMETER
_V1LAYERPARAMETER.fields_by_name["hdf5_data_param"].message_type = _HDF5DATAPARAMETER
_V1LAYERPARAMETER.fields_by_name[
"hdf5_output_param"
].message_type = _HDF5OUTPUTPARAMETER
_V1LAYERPARAMETER.fields_by_name["hinge_loss_param"].message_type = _HINGELOSSPARAMETER
_V1LAYERPARAMETER.fields_by_name["image_data_param"].message_type = _IMAGEDATAPARAMETER
_V1LAYERPARAMETER.fields_by_name[
"infogain_loss_param"
].message_type = _INFOGAINLOSSPARAMETER
_V1LAYERPARAMETER.fields_by_name[
"inner_product_param"
].message_type = _INNERPRODUCTPARAMETER
_V1LAYERPARAMETER.fields_by_name["lrn_param"].message_type = _LRNPARAMETER
_V1LAYERPARAMETER.fields_by_name[
"memory_data_param"
].message_type = _MEMORYDATAPARAMETER
_V1LAYERPARAMETER.fields_by_name["mvn_param"].message_type = _MVNPARAMETER
_V1LAYERPARAMETER.fields_by_name["pooling_param"].message_type = _POOLINGPARAMETER
_V1LAYERPARAMETER.fields_by_name["power_param"].message_type = _POWERPARAMETER
_V1LAYERPARAMETER.fields_by_name["relu_param"].message_type = _RELUPARAMETER
_V1LAYERPARAMETER.fields_by_name["sigmoid_param"].message_type = _SIGMOIDPARAMETER
_V1LAYERPARAMETER.fields_by_name["softmax_param"].message_type = _SOFTMAXPARAMETER
_V1LAYERPARAMETER.fields_by_name["slice_param"].message_type = _SLICEPARAMETER
_V1LAYERPARAMETER.fields_by_name["tanh_param"].message_type = _TANHPARAMETER
_V1LAYERPARAMETER.fields_by_name["threshold_param"].message_type = _THRESHOLDPARAMETER
_V1LAYERPARAMETER.fields_by_name[
"window_data_param"
].message_type = _WINDOWDATAPARAMETER
_V1LAYERPARAMETER.fields_by_name[
"transform_param"
].message_type = _TRANSFORMATIONPARAMETER
_V1LAYERPARAMETER.fields_by_name["loss_param"].message_type = _LOSSPARAMETER
_V1LAYERPARAMETER.fields_by_name["layer"].message_type = _V0LAYERPARAMETER
_V1LAYERPARAMETER_LAYERTYPE.containing_type = _V1LAYERPARAMETER
_V1LAYERPARAMETER_DIMCHECKMODE.containing_type = _V1LAYERPARAMETER
_V0LAYERPARAMETER.fields_by_name["weight_filler"].message_type = _FILLERPARAMETER
_V0LAYERPARAMETER.fields_by_name["bias_filler"].message_type = _FILLERPARAMETER
_V0LAYERPARAMETER.fields_by_name["pool"].enum_type = _V0LAYERPARAMETER_POOLMETHOD
_V0LAYERPARAMETER.fields_by_name["blobs"].message_type = _BLOBPROTO
_V0LAYERPARAMETER.fields_by_name[
"hdf5_output_param"
].message_type = _HDF5OUTPUTPARAMETER
_V0LAYERPARAMETER_POOLMETHOD.containing_type = _V0LAYERPARAMETER
_PRELUPARAMETER.fields_by_name["filler"].message_type = _FILLERPARAMETER
DESCRIPTOR.message_types_by_name["BlobShape"] = _BLOBSHAPE
DESCRIPTOR.message_types_by_name["BlobProto"] = _BLOBPROTO
DESCRIPTOR.message_types_by_name["BlobProtoVector"] = _BLOBPROTOVECTOR
DESCRIPTOR.message_types_by_name["Datum"] = _DATUM
DESCRIPTOR.message_types_by_name["FillerParameter"] = _FILLERPARAMETER
DESCRIPTOR.message_types_by_name["NetParameter"] = _NETPARAMETER
DESCRIPTOR.message_types_by_name["SolverParameter"] = _SOLVERPARAMETER
DESCRIPTOR.message_types_by_name["SolverState"] = _SOLVERSTATE
DESCRIPTOR.message_types_by_name["NetState"] = _NETSTATE
DESCRIPTOR.message_types_by_name["NetStateRule"] = _NETSTATERULE
DESCRIPTOR.message_types_by_name["ParamSpec"] = _PARAMSPEC
DESCRIPTOR.message_types_by_name["LayerParameter"] = _LAYERPARAMETER
DESCRIPTOR.message_types_by_name["TransformationParameter"] = _TRANSFORMATIONPARAMETER
DESCRIPTOR.message_types_by_name["LossParameter"] = _LOSSPARAMETER
DESCRIPTOR.message_types_by_name["AccuracyParameter"] = _ACCURACYPARAMETER
DESCRIPTOR.message_types_by_name["ArgMaxParameter"] = _ARGMAXPARAMETER
DESCRIPTOR.message_types_by_name["ConcatParameter"] = _CONCATPARAMETER
DESCRIPTOR.message_types_by_name["BatchNormParameter"] = _BATCHNORMPARAMETER
DESCRIPTOR.message_types_by_name["BiasParameter"] = _BIASPARAMETER
DESCRIPTOR.message_types_by_name["ContrastiveLossParameter"] = _CONTRASTIVELOSSPARAMETER
DESCRIPTOR.message_types_by_name["ConvolutionParameter"] = _CONVOLUTIONPARAMETER
DESCRIPTOR.message_types_by_name["CropParameter"] = _CROPPARAMETER
DESCRIPTOR.message_types_by_name["DataParameter"] = _DATAPARAMETER
DESCRIPTOR.message_types_by_name["DropoutParameter"] = _DROPOUTPARAMETER
DESCRIPTOR.message_types_by_name["DummyDataParameter"] = _DUMMYDATAPARAMETER
DESCRIPTOR.message_types_by_name["EltwiseParameter"] = _ELTWISEPARAMETER
DESCRIPTOR.message_types_by_name["ELUParameter"] = _ELUPARAMETER
DESCRIPTOR.message_types_by_name["EmbedParameter"] = _EMBEDPARAMETER
DESCRIPTOR.message_types_by_name["ExpParameter"] = _EXPPARAMETER
DESCRIPTOR.message_types_by_name["FlattenParameter"] = _FLATTENPARAMETER
DESCRIPTOR.message_types_by_name["HDF5DataParameter"] = _HDF5DATAPARAMETER
DESCRIPTOR.message_types_by_name["HDF5OutputParameter"] = _HDF5OUTPUTPARAMETER
DESCRIPTOR.message_types_by_name["HingeLossParameter"] = _HINGELOSSPARAMETER
DESCRIPTOR.message_types_by_name["ImageDataParameter"] = _IMAGEDATAPARAMETER
DESCRIPTOR.message_types_by_name["InfogainLossParameter"] = _INFOGAINLOSSPARAMETER
DESCRIPTOR.message_types_by_name["InnerProductParameter"] = _INNERPRODUCTPARAMETER
DESCRIPTOR.message_types_by_name["InputParameter"] = _INPUTPARAMETER
DESCRIPTOR.message_types_by_name["LogParameter"] = _LOGPARAMETER
DESCRIPTOR.message_types_by_name["LRNParameter"] = _LRNPARAMETER
DESCRIPTOR.message_types_by_name["MemoryDataParameter"] = _MEMORYDATAPARAMETER
DESCRIPTOR.message_types_by_name["MVNParameter"] = _MVNPARAMETER
DESCRIPTOR.message_types_by_name["ParameterParameter"] = _PARAMETERPARAMETER
DESCRIPTOR.message_types_by_name["PoolingParameter"] = _POOLINGPARAMETER
DESCRIPTOR.message_types_by_name["PowerParameter"] = _POWERPARAMETER
DESCRIPTOR.message_types_by_name["PythonParameter"] = _PYTHONPARAMETER
DESCRIPTOR.message_types_by_name["RecurrentParameter"] = _RECURRENTPARAMETER
DESCRIPTOR.message_types_by_name["ReductionParameter"] = _REDUCTIONPARAMETER
DESCRIPTOR.message_types_by_name["ReLUParameter"] = _RELUPARAMETER
DESCRIPTOR.message_types_by_name["ReshapeParameter"] = _RESHAPEPARAMETER
DESCRIPTOR.message_types_by_name["ScaleParameter"] = _SCALEPARAMETER
DESCRIPTOR.message_types_by_name["SigmoidParameter"] = _SIGMOIDPARAMETER
DESCRIPTOR.message_types_by_name["SliceParameter"] = _SLICEPARAMETER
DESCRIPTOR.message_types_by_name["SoftmaxParameter"] = _SOFTMAXPARAMETER
DESCRIPTOR.message_types_by_name["TanHParameter"] = _TANHPARAMETER
DESCRIPTOR.message_types_by_name["TileParameter"] = _TILEPARAMETER
DESCRIPTOR.message_types_by_name["ThresholdParameter"] = _THRESHOLDPARAMETER
DESCRIPTOR.message_types_by_name["WindowDataParameter"] = _WINDOWDATAPARAMETER
DESCRIPTOR.message_types_by_name["SPPParameter"] = _SPPPARAMETER
DESCRIPTOR.message_types_by_name["V1LayerParameter"] = _V1LAYERPARAMETER
DESCRIPTOR.message_types_by_name["V0LayerParameter"] = _V0LAYERPARAMETER
DESCRIPTOR.message_types_by_name["PReLUParameter"] = _PRELUPARAMETER
DESCRIPTOR.enum_types_by_name["Phase"] = _PHASE
BlobShape = _reflection.GeneratedProtocolMessageType(
"BlobShape",
(_message.Message,),
dict(
DESCRIPTOR=_BLOBSHAPE,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.BlobShape)
),
)
_sym_db.RegisterMessage(BlobShape)
BlobProto = _reflection.GeneratedProtocolMessageType(
"BlobProto",
(_message.Message,),
dict(
DESCRIPTOR=_BLOBPROTO,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.BlobProto)
),
)
_sym_db.RegisterMessage(BlobProto)
BlobProtoVector = _reflection.GeneratedProtocolMessageType(
"BlobProtoVector",
(_message.Message,),
dict(
DESCRIPTOR=_BLOBPROTOVECTOR,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.BlobProtoVector)
),
)
_sym_db.RegisterMessage(BlobProtoVector)
Datum = _reflection.GeneratedProtocolMessageType(
"Datum",
(_message.Message,),
dict(
DESCRIPTOR=_DATUM,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.Datum)
),
)
_sym_db.RegisterMessage(Datum)
FillerParameter = _reflection.GeneratedProtocolMessageType(
"FillerParameter",
(_message.Message,),
dict(
DESCRIPTOR=_FILLERPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.FillerParameter)
),
)
_sym_db.RegisterMessage(FillerParameter)
NetParameter = _reflection.GeneratedProtocolMessageType(
"NetParameter",
(_message.Message,),
dict(
DESCRIPTOR=_NETPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.NetParameter)
),
)
_sym_db.RegisterMessage(NetParameter)
SolverParameter = _reflection.GeneratedProtocolMessageType(
"SolverParameter",
(_message.Message,),
dict(
DESCRIPTOR=_SOLVERPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.SolverParameter)
),
)
_sym_db.RegisterMessage(SolverParameter)
SolverState = _reflection.GeneratedProtocolMessageType(
"SolverState",
(_message.Message,),
dict(
DESCRIPTOR=_SOLVERSTATE,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.SolverState)
),
)
_sym_db.RegisterMessage(SolverState)
NetState = _reflection.GeneratedProtocolMessageType(
"NetState",
(_message.Message,),
dict(
DESCRIPTOR=_NETSTATE,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.NetState)
),
)
_sym_db.RegisterMessage(NetState)
NetStateRule = _reflection.GeneratedProtocolMessageType(
"NetStateRule",
(_message.Message,),
dict(
DESCRIPTOR=_NETSTATERULE,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.NetStateRule)
),
)
_sym_db.RegisterMessage(NetStateRule)
ParamSpec = _reflection.GeneratedProtocolMessageType(
"ParamSpec",
(_message.Message,),
dict(
DESCRIPTOR=_PARAMSPEC,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.ParamSpec)
),
)
_sym_db.RegisterMessage(ParamSpec)
LayerParameter = _reflection.GeneratedProtocolMessageType(
"LayerParameter",
(_message.Message,),
dict(
DESCRIPTOR=_LAYERPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.LayerParameter)
),
)
_sym_db.RegisterMessage(LayerParameter)
TransformationParameter = _reflection.GeneratedProtocolMessageType(
"TransformationParameter",
(_message.Message,),
dict(
DESCRIPTOR=_TRANSFORMATIONPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.TransformationParameter)
),
)
_sym_db.RegisterMessage(TransformationParameter)
LossParameter = _reflection.GeneratedProtocolMessageType(
"LossParameter",
(_message.Message,),
dict(
DESCRIPTOR=_LOSSPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.LossParameter)
),
)
_sym_db.RegisterMessage(LossParameter)
AccuracyParameter = _reflection.GeneratedProtocolMessageType(
"AccuracyParameter",
(_message.Message,),
dict(
DESCRIPTOR=_ACCURACYPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.AccuracyParameter)
),
)
_sym_db.RegisterMessage(AccuracyParameter)
ArgMaxParameter = _reflection.GeneratedProtocolMessageType(
"ArgMaxParameter",
(_message.Message,),
dict(
DESCRIPTOR=_ARGMAXPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.ArgMaxParameter)
),
)
_sym_db.RegisterMessage(ArgMaxParameter)
ConcatParameter = _reflection.GeneratedProtocolMessageType(
"ConcatParameter",
(_message.Message,),
dict(
DESCRIPTOR=_CONCATPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.ConcatParameter)
),
)
_sym_db.RegisterMessage(ConcatParameter)
BatchNormParameter = _reflection.GeneratedProtocolMessageType(
"BatchNormParameter",
(_message.Message,),
dict(
DESCRIPTOR=_BATCHNORMPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.BatchNormParameter)
),
)
_sym_db.RegisterMessage(BatchNormParameter)
BiasParameter = _reflection.GeneratedProtocolMessageType(
"BiasParameter",
(_message.Message,),
dict(
DESCRIPTOR=_BIASPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.BiasParameter)
),
)
_sym_db.RegisterMessage(BiasParameter)
ContrastiveLossParameter = _reflection.GeneratedProtocolMessageType(
"ContrastiveLossParameter",
(_message.Message,),
dict(
DESCRIPTOR=_CONTRASTIVELOSSPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.ContrastiveLossParameter)
),
)
_sym_db.RegisterMessage(ContrastiveLossParameter)
ConvolutionParameter = _reflection.GeneratedProtocolMessageType(
"ConvolutionParameter",
(_message.Message,),
dict(
DESCRIPTOR=_CONVOLUTIONPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.ConvolutionParameter)
),
)
_sym_db.RegisterMessage(ConvolutionParameter)
CropParameter = _reflection.GeneratedProtocolMessageType(
"CropParameter",
(_message.Message,),
dict(
DESCRIPTOR=_CROPPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.CropParameter)
),
)
_sym_db.RegisterMessage(CropParameter)
DataParameter = _reflection.GeneratedProtocolMessageType(
"DataParameter",
(_message.Message,),
dict(
DESCRIPTOR=_DATAPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.DataParameter)
),
)
_sym_db.RegisterMessage(DataParameter)
DropoutParameter = _reflection.GeneratedProtocolMessageType(
"DropoutParameter",
(_message.Message,),
dict(
DESCRIPTOR=_DROPOUTPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.DropoutParameter)
),
)
_sym_db.RegisterMessage(DropoutParameter)
DummyDataParameter = _reflection.GeneratedProtocolMessageType(
"DummyDataParameter",
(_message.Message,),
dict(
DESCRIPTOR=_DUMMYDATAPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.DummyDataParameter)
),
)
_sym_db.RegisterMessage(DummyDataParameter)
EltwiseParameter = _reflection.GeneratedProtocolMessageType(
"EltwiseParameter",
(_message.Message,),
dict(
DESCRIPTOR=_ELTWISEPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.EltwiseParameter)
),
)
_sym_db.RegisterMessage(EltwiseParameter)
ELUParameter = _reflection.GeneratedProtocolMessageType(
"ELUParameter",
(_message.Message,),
dict(
DESCRIPTOR=_ELUPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.ELUParameter)
),
)
_sym_db.RegisterMessage(ELUParameter)
EmbedParameter = _reflection.GeneratedProtocolMessageType(
"EmbedParameter",
(_message.Message,),
dict(
DESCRIPTOR=_EMBEDPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.EmbedParameter)
),
)
_sym_db.RegisterMessage(EmbedParameter)
ExpParameter = _reflection.GeneratedProtocolMessageType(
"ExpParameter",
(_message.Message,),
dict(
DESCRIPTOR=_EXPPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.ExpParameter)
),
)
_sym_db.RegisterMessage(ExpParameter)
FlattenParameter = _reflection.GeneratedProtocolMessageType(
"FlattenParameter",
(_message.Message,),
dict(
DESCRIPTOR=_FLATTENPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.FlattenParameter)
),
)
_sym_db.RegisterMessage(FlattenParameter)
HDF5DataParameter = _reflection.GeneratedProtocolMessageType(
"HDF5DataParameter",
(_message.Message,),
dict(
DESCRIPTOR=_HDF5DATAPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.HDF5DataParameter)
),
)
_sym_db.RegisterMessage(HDF5DataParameter)
HDF5OutputParameter = _reflection.GeneratedProtocolMessageType(
"HDF5OutputParameter",
(_message.Message,),
dict(
DESCRIPTOR=_HDF5OUTPUTPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.HDF5OutputParameter)
),
)
_sym_db.RegisterMessage(HDF5OutputParameter)
HingeLossParameter = _reflection.GeneratedProtocolMessageType(
"HingeLossParameter",
(_message.Message,),
dict(
DESCRIPTOR=_HINGELOSSPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.HingeLossParameter)
),
)
_sym_db.RegisterMessage(HingeLossParameter)
ImageDataParameter = _reflection.GeneratedProtocolMessageType(
"ImageDataParameter",
(_message.Message,),
dict(
DESCRIPTOR=_IMAGEDATAPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.ImageDataParameter)
),
)
_sym_db.RegisterMessage(ImageDataParameter)
InfogainLossParameter = _reflection.GeneratedProtocolMessageType(
"InfogainLossParameter",
(_message.Message,),
dict(
DESCRIPTOR=_INFOGAINLOSSPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.InfogainLossParameter)
),
)
_sym_db.RegisterMessage(InfogainLossParameter)
InnerProductParameter = _reflection.GeneratedProtocolMessageType(
"InnerProductParameter",
(_message.Message,),
dict(
DESCRIPTOR=_INNERPRODUCTPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.InnerProductParameter)
),
)
_sym_db.RegisterMessage(InnerProductParameter)
InputParameter = _reflection.GeneratedProtocolMessageType(
"InputParameter",
(_message.Message,),
dict(
DESCRIPTOR=_INPUTPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.InputParameter)
),
)
_sym_db.RegisterMessage(InputParameter)
LogParameter = _reflection.GeneratedProtocolMessageType(
"LogParameter",
(_message.Message,),
dict(
DESCRIPTOR=_LOGPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.LogParameter)
),
)
_sym_db.RegisterMessage(LogParameter)
LRNParameter = _reflection.GeneratedProtocolMessageType(
"LRNParameter",
(_message.Message,),
dict(
DESCRIPTOR=_LRNPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.LRNParameter)
),
)
_sym_db.RegisterMessage(LRNParameter)
MemoryDataParameter = _reflection.GeneratedProtocolMessageType(
"MemoryDataParameter",
(_message.Message,),
dict(
DESCRIPTOR=_MEMORYDATAPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.MemoryDataParameter)
),
)
_sym_db.RegisterMessage(MemoryDataParameter)
MVNParameter = _reflection.GeneratedProtocolMessageType(
"MVNParameter",
(_message.Message,),
dict(
DESCRIPTOR=_MVNPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.MVNParameter)
),
)
_sym_db.RegisterMessage(MVNParameter)
ParameterParameter = _reflection.GeneratedProtocolMessageType(
"ParameterParameter",
(_message.Message,),
dict(
DESCRIPTOR=_PARAMETERPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.ParameterParameter)
),
)
_sym_db.RegisterMessage(ParameterParameter)
PoolingParameter = _reflection.GeneratedProtocolMessageType(
"PoolingParameter",
(_message.Message,),
dict(
DESCRIPTOR=_POOLINGPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.PoolingParameter)
),
)
_sym_db.RegisterMessage(PoolingParameter)
PowerParameter = _reflection.GeneratedProtocolMessageType(
"PowerParameter",
(_message.Message,),
dict(
DESCRIPTOR=_POWERPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.PowerParameter)
),
)
_sym_db.RegisterMessage(PowerParameter)
PythonParameter = _reflection.GeneratedProtocolMessageType(
"PythonParameter",
(_message.Message,),
dict(
DESCRIPTOR=_PYTHONPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.PythonParameter)
),
)
_sym_db.RegisterMessage(PythonParameter)
RecurrentParameter = _reflection.GeneratedProtocolMessageType(
"RecurrentParameter",
(_message.Message,),
dict(
DESCRIPTOR=_RECURRENTPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.RecurrentParameter)
),
)
_sym_db.RegisterMessage(RecurrentParameter)
ReductionParameter = _reflection.GeneratedProtocolMessageType(
"ReductionParameter",
(_message.Message,),
dict(
DESCRIPTOR=_REDUCTIONPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.ReductionParameter)
),
)
_sym_db.RegisterMessage(ReductionParameter)
ReLUParameter = _reflection.GeneratedProtocolMessageType(
"ReLUParameter",
(_message.Message,),
dict(
DESCRIPTOR=_RELUPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.ReLUParameter)
),
)
_sym_db.RegisterMessage(ReLUParameter)
ReshapeParameter = _reflection.GeneratedProtocolMessageType(
"ReshapeParameter",
(_message.Message,),
dict(
DESCRIPTOR=_RESHAPEPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.ReshapeParameter)
),
)
_sym_db.RegisterMessage(ReshapeParameter)
ScaleParameter = _reflection.GeneratedProtocolMessageType(
"ScaleParameter",
(_message.Message,),
dict(
DESCRIPTOR=_SCALEPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.ScaleParameter)
),
)
_sym_db.RegisterMessage(ScaleParameter)
SigmoidParameter = _reflection.GeneratedProtocolMessageType(
"SigmoidParameter",
(_message.Message,),
dict(
DESCRIPTOR=_SIGMOIDPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.SigmoidParameter)
),
)
_sym_db.RegisterMessage(SigmoidParameter)
SliceParameter = _reflection.GeneratedProtocolMessageType(
"SliceParameter",
(_message.Message,),
dict(
DESCRIPTOR=_SLICEPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.SliceParameter)
),
)
_sym_db.RegisterMessage(SliceParameter)
SoftmaxParameter = _reflection.GeneratedProtocolMessageType(
"SoftmaxParameter",
(_message.Message,),
dict(
DESCRIPTOR=_SOFTMAXPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.SoftmaxParameter)
),
)
_sym_db.RegisterMessage(SoftmaxParameter)
TanHParameter = _reflection.GeneratedProtocolMessageType(
"TanHParameter",
(_message.Message,),
dict(
DESCRIPTOR=_TANHPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.TanHParameter)
),
)
_sym_db.RegisterMessage(TanHParameter)
TileParameter = _reflection.GeneratedProtocolMessageType(
"TileParameter",
(_message.Message,),
dict(
DESCRIPTOR=_TILEPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.TileParameter)
),
)
_sym_db.RegisterMessage(TileParameter)
ThresholdParameter = _reflection.GeneratedProtocolMessageType(
"ThresholdParameter",
(_message.Message,),
dict(
DESCRIPTOR=_THRESHOLDPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.ThresholdParameter)
),
)
_sym_db.RegisterMessage(ThresholdParameter)
WindowDataParameter = _reflection.GeneratedProtocolMessageType(
"WindowDataParameter",
(_message.Message,),
dict(
DESCRIPTOR=_WINDOWDATAPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.WindowDataParameter)
),
)
_sym_db.RegisterMessage(WindowDataParameter)
SPPParameter = _reflection.GeneratedProtocolMessageType(
"SPPParameter",
(_message.Message,),
dict(
DESCRIPTOR=_SPPPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.SPPParameter)
),
)
_sym_db.RegisterMessage(SPPParameter)
V1LayerParameter = _reflection.GeneratedProtocolMessageType(
"V1LayerParameter",
(_message.Message,),
dict(
DESCRIPTOR=_V1LAYERPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.V1LayerParameter)
),
)
_sym_db.RegisterMessage(V1LayerParameter)
V0LayerParameter = _reflection.GeneratedProtocolMessageType(
"V0LayerParameter",
(_message.Message,),
dict(
DESCRIPTOR=_V0LAYERPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.V0LayerParameter)
),
)
_sym_db.RegisterMessage(V0LayerParameter)
PReLUParameter = _reflection.GeneratedProtocolMessageType(
"PReLUParameter",
(_message.Message,),
dict(
DESCRIPTOR=_PRELUPARAMETER,
__module__="caffe_pb2"
# @@protoc_insertion_point(class_scope:caffe.PReLUParameter)
),
)
_sym_db.RegisterMessage(PReLUParameter)
_BLOBSHAPE.fields_by_name["dim"].has_options = True
_BLOBSHAPE.fields_by_name["dim"]._options = _descriptor._ParseOptions(
descriptor_pb2.FieldOptions(), _b("\020\001")
)
_BLOBPROTO.fields_by_name["data"].has_options = True
_BLOBPROTO.fields_by_name["data"]._options = _descriptor._ParseOptions(
descriptor_pb2.FieldOptions(), _b("\020\001")
)
_BLOBPROTO.fields_by_name["diff"].has_options = True
_BLOBPROTO.fields_by_name["diff"]._options = _descriptor._ParseOptions(
descriptor_pb2.FieldOptions(), _b("\020\001")
)
_BLOBPROTO.fields_by_name["double_data"].has_options = True
_BLOBPROTO.fields_by_name["double_data"]._options = _descriptor._ParseOptions(
descriptor_pb2.FieldOptions(), _b("\020\001")
)
_BLOBPROTO.fields_by_name["double_diff"].has_options = True
_BLOBPROTO.fields_by_name["double_diff"]._options = _descriptor._ParseOptions(
descriptor_pb2.FieldOptions(), _b("\020\001")
)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/core/export/caffe/caffe_pb2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Core dataset module."""
from nvidia_tao_tf1.core.dataloader.dataset import (
DefaultOverSamplingStrategy,
OverSamplingConfig,
OverSamplingStrategy,
)
__all__ = (
"DefaultOverSamplingStrategy",
"OverSamplingConfig",
"OverSamplingStrategy",
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/core/dataloader/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Core dataset objects."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import abstractmethod
import collections
import logging
from nvidia_tao_tf1.core.coreobject import (
AbstractTAOObject,
TAOObject,
save_args
)
logger = logging.getLogger(__name__)
# TODO(@williamz): Consider moving all this into the `DefaultOverSamplingStrategy` constructor.
class OverSamplingConfig(TAOObject):
"""Config object meant to accompany DefaultOverSamplingConfig.
Configure oversampling for the data loader based on label classes ('classifier').
There are two modes:
OverSamplingConfig.LABEL_OCCURENCE: based on whether frame has or hasn't a label. If
frame has label, then multiply based on the count given in 'class_id_base_counts.'
OverSamplingConfig.INSTANCE_COUNT: scales repetition based on the number of instances of
given label. Say there are 3 occurences of label A in the frame, and the base count
for A is 6. Then the frame is repeated 6 / 3 = 2 times.
"""
INSTANCE_COUNT = 0
LABEL_OCCURENCE = 1
@save_args
def __init__(self, class_to_id_map, class_id_base_counts, mode):
"""Constructor.
Note: the class names in class_to_id_map must be stripped from whitespace and lowercase:
class_name.strip().lower().
Args:
class_to_id_map (dict[str, int]): Mapping from classifier name to class id number.
class_id_base_counts(dict[int, float]): For each class id, the base count (see above.)
mode (int): OverSamplingConfig.INSTANCE_COUNT|LABEL_OCCURENCE
"""
if set(class_to_id_map.values()).difference(set(class_id_base_counts.keys())):
raise ValueError(
"Not all values in class_to_id_map have base counts, missing: {}".format(
set(class_to_id_map.values()).difference(
set(class_id_base_counts.keys())
)
)
)
self.class_to_id_map = class_to_id_map
self.class_id_base_counts = class_id_base_counts
self.mode = mode
for class_name in class_to_id_map:
if class_name.strip().lower() != class_name:
raise RuntimeError(
"class_to_id_map must have strip().lower()'ed' class names."
)
# TODO(@williamz): This could technically be "abused" to implement filtering of frame IDs.
# Consider renaming if this is useful in practice.
class OverSamplingStrategy(AbstractTAOObject):
"""Interface for oversampling frames."""
@abstractmethod
def oversample(self, frame_groups, count_lookup):
"""Determines which frames to oversample.
Note: Oversampling could lead to some frames being repeated a lot which can have an affect
on random shuffling.
Args:
frame_groups (list): List of list of tuples. Outer list is over frame groups,
inner list contains a tuple of (frame_id(int), unused).
count_lookup (dict): dict(frame_id -> dict(class_name -> dict(attribute_sets -> cnt)))
class_count[<frame id>][<classifier>][<attribute set>] -> class count for specific
attribute set in that frame.
class_count[<frame id>][<classifier>]["COUNT"] -> count of all instances of
<classifier> in that frame.
Returns:
repeated_groups (list): Follows the same structure as `frame_groups`. It should
contain the frames that are to be repeated.
"""
raise NotImplementedError(
"This method is not implemented in the base class."
)
class DefaultOverSamplingStrategy(OverSamplingStrategy):
"""Default strategy for oversampling."""
def __init__(self, oversampling_config):
"""Constructor.
Args:
oversampling_config (OverSamplingConfig).
"""
self._oversampling_config = oversampling_config
def oversample(self, frame_groups, count_lookup):
"""Determines which frames to oversample.
Args:
frame_groups (list): List of list of tuples. Outer list is over frame groups,
inner list contains a tuple of (frame id(int), unlabeled(bool)).
count_lookup (dict): dict(frame_id -> dict(class_name -> dict(attribute_sets -> cnt)))
class_count[<frame id>][<classifier>][<attribute set>] -> class count for specific
attribute set in that frame.
class_count[<frame id>][<classifier>]["COUNT"] -> count of all instances of
<classifier> in that frame.
Returns:
repeated_groups (list): Follows the same structure as `frame_groups`. It should
contain the frames that are to be repeated.
"""
repeated_groups = []
for frame_group in frame_groups:
class_counts = collections.Counter()
class_to_id_map = self._oversampling_config.class_to_id_map
for frame_id, _ in frame_group:
if self._oversampling_config.mode == OverSamplingConfig.INSTANCE_COUNT:
classifier_map = count_lookup[frame_id]
for classifier in classifier_map.keys():
class_counts[class_to_id_map[classifier]] += float(
classifier_map[classifier]["COUNT"]
)
elif (
self._oversampling_config.mode == OverSamplingConfig.LABEL_OCCURENCE
):
for classifier in count_lookup[frame_id]:
if classifier in class_to_id_map:
class_counts[class_to_id_map[classifier]] = 1
else:
raise ValueError(
"Unknown oversampling mode: {}".format(
self._oversampling_config.mode
)
)
repeats = 1
for class_id, count in class_counts.items():
repeats += (
self._oversampling_config.class_id_base_counts[class_id] / count
)
repeats = int(repeats + 0.5)
for _ in range(repeats):
repeated_groups.append(frame_group)
return repeated_groups
class _DerivedField(object):
"""A synthetic field, whose value is computed based on other fields."""
def __init__(self, value_type, shape=None):
self.select = ""
self.value_type = value_type
self.frame_field = True
self.label_type = None
self.frame_id_key = False
self.label_type_field = False
self.select = "0"
self.shape = shape
self.prune = False
def __repr__(self):
return "DerivedField object: {}".format(self.__dict__)
def create_derived_field(value_type, shape):
"""Creates a field whose value is computed at read-time based on other fields."""
return _DerivedField(value_type, shape)
class FeatureProcessor(AbstractTAOObject):
"""Class that filters and maps feature rows."""
@abstractmethod
def add_fields(self, example):
"""
Add new fields to the example data structure (labels).
Example:
example.labels['BOX']['testfield_int'] = create_derived_field(tf.int32, shape=None)
Args:
example (namedtuple): data structure that the loader returns.
"""
raise NotImplementedError()
@abstractmethod
def filter(self, example_col_idx, dtype, row):
"""
Filter label rows.
Args:
example_col_idx (namedtuple): example data structure, where fields are integers
that correspond to the index of the value in 'row'
dtype (str): label type, such as 'BOX' or 'POLYGON'.
row (list): flat list of values from the database for one label. Use example_col_idx
to find which element corresponds to which field in the 'example'.
Return:
True or False, depending on whether the row should be kept.
"""
raise NotImplementedError()
@abstractmethod
def map(self, example_col_idx, dtype, row):
"""
Modify or inject values into the feature row.
Args:
example_col_idx (namedtuple): example data structure, where fields are integers
that correspond to the index of the value in 'row'
dtype (str): label type, such as 'BOX' or 'POLYGON'.
row (list): flat list of values from the database for one label. Use example_col_idx
to find which element corresponds to which field in the 'example'.
Return:
modified 'row'.
"""
raise NotImplementedError()
class FrameProcessor(AbstractTAOObject):
"""
Extension object to modify / add fields to frames.
Note: frames cannot be filtered, this must be done when resolving the frame ids to load
(see _get_all_frames() etc). This is because otherwise there is no efficient way to
compute the dataset size.
"""
@abstractmethod
def add_fields(self, example):
"""
Add new fields to the example data structure (labels).
Example:
example.labels['BOX']['testfield_int'] = create_derived_field(tf.int32, shape=None)
Args:
example (namedtuple): data structure that the loader returns.
"""
raise NotImplementedError()
@abstractmethod
def map(self, example_col_idx, frame):
"""
Modify or inject values into the frame.
Args:
example_col_idx (namedtuple): example data structure, where fields are integers
that correspond to the index of the value in 'row'
frame (list): flat list of values from the database for a frame. Use example_col_idx
to find which element corresponds to which field in the 'example'.
Return:
modified 'frame'.
"""
raise NotImplementedError()
def _default_value(shape):
if shape is None:
return None
if len(shape) == 1:
return []
if len(shape) == 2:
return [[]]
raise ValueError(
"Currently only support 1 or 2 dimensional shapes, given: {}".format(shape)
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/core/dataloader/dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test helper functions for data conversion."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.core.dataloader.tfrecord.converter_lib import _partition, _shard
def test_single_partition():
"""Partition a list of sequences into a single partition."""
sequences = [[0, 1, 2], [3, 4]]
assert _partition(sequences, 0, 1) == [[0, 1, 2, 3, 4]]
assert _partition(sequences, 1, 1) == [[0, 1, 2, 3, 4]]
def test_two_partitions():
"""Partition a list of sequences into 2 partitions."""
sequences = [[0, 1], [2, 3, 4], [5]]
assert _partition(sequences, 2, 1) == [[2, 3, 4], [0, 1, 5]]
def test_three_partitions():
"""Partition a list of sequences into 3 partitions."""
sequences = [[0, 1], [2, 3, 4], [5], [6, 7, 8, 9]]
assert _partition(sequences, 3, 1) == [[6, 7, 8, 9], [2, 3, 4], [0, 1, 5]]
def test_partitions_divisor():
"""Partition a list of sequences into 2 partitions."""
sequences = [[0, 1], [2, 3, 4], [5]]
assert _partition(sequences, 2, 2) == [[2, 3], [0, 1]]
def test_sharding():
"""Shard a list of partitions."""
partitions = [[0, 1, 2], [3, 4]]
assert _shard(partitions, 2) == [[[0], [1, 2]], [[3], [4]]]
| tao_tensorflow1_backend-main | nvidia_tao_tf1/core/dataloader/tfrecord/converter_lib_test.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modulus dataloader tfrecord module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.core.dataloader.tfrecord.converter_lib import _bytes_feature
from nvidia_tao_tf1.core.dataloader.tfrecord.converter_lib import _convert_unicode_to_str
from nvidia_tao_tf1.core.dataloader.tfrecord.converter_lib import _float_feature
from nvidia_tao_tf1.core.dataloader.tfrecord.converter_lib import _int64_feature
from nvidia_tao_tf1.core.dataloader.tfrecord.converter_lib import _partition
from nvidia_tao_tf1.core.dataloader.tfrecord.converter_lib import _shard
from nvidia_tao_tf1.core.dataloader.tfrecord.converter_lib import _shuffle
__all__ = (
"_bytes_feature",
"_convert_unicode_to_str",
"_float_feature",
"_int64_feature",
"_partition",
"_shard",
"_shuffle",
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/core/dataloader/tfrecord/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for converting datasets to .tfrecords."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pprint
import random
import sys
import tensorflow as tf
def _convert_unicode_to_str(item):
if sys.version_info >= (3, 0):
# Python 3 strings are unicode, need to convert to bytes.
if isinstance(item, str):
return item.encode("ascii", "ignore")
return item
if isinstance(item, unicode): # pylint: disable=undefined-variable # noqa: F821
return item.encode("ascii", "ignore")
return item
def _bytes_feature(*values):
# Convert unicode data to string for saving to TFRecords.
values = [_convert_unicode_to_str(value) for value in values]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=values))
def _float_feature(*values):
return tf.train.Feature(float_list=tf.train.FloatList(value=values))
def _int64_feature(*values):
return tf.train.Feature(int64_list=tf.train.Int64List(value=values))
def _partition(sequences, num_partitions, divisor, uneven=False):
"""Partition a list of sequences to approximately equal lengths."""
num_partitions = max(num_partitions, 1) # 0 means 1 partition.
# The sequence with longest frames sits at the top.
sequences_by_length = sorted(sequences, key=len)
partitions = [[] for _ in range(num_partitions)]
while sequences_by_length:
longest_sequence = sequences_by_length.pop()
# Add the longest_sequence to the shortest partition.
smallest_partition = min(partitions, key=len)
smallest_partition.extend(longest_sequence)
for partition in partitions:
for _ in range(len(partition) % divisor):
partition.pop()
if num_partitions > 1 and uneven:
if len(partitions) != num_partitions:
raise RuntimeError("Check the number of partitions.")
# Flatten the first num_partitions - 1 into one list.
flat_list = [item for l in partitions[0 : num_partitions - 1] for item in l]
# Allocate the first k-1th as the 0th partition and the kth as the 1st partition.
partitions = [flat_list, partitions[-1]]
validation_sequence_stats = dict()
for frame in partitions[-1]:
if "sequence" in frame.keys():
sequence_name = frame["sequence"]["name"]
else:
sequence_name = frame["sequence_name"]
if sequence_name is None:
raise RuntimeError("Sequence name is None.")
if sequence_name in validation_sequence_stats.keys():
validation_sequence_stats[sequence_name] += 1
else:
validation_sequence_stats[sequence_name] = 1
pp = pprint.PrettyPrinter(indent=4)
print("%d training frames " % (len(partitions[0])))
print("%d validation frames" % (len(partitions[-1])))
print("Validation sequence stats:")
print("Sequence name: #frame")
pp.pprint(validation_sequence_stats)
return partitions
def _shard(partitions, num_shards):
"""Shard each partition."""
num_shards = max(num_shards, 1) # 0 means 1 shard.
shards = []
for partition in partitions:
result = []
shard_size = len(partition) // num_shards
for i in range(num_shards):
begin = i * shard_size
end = (i + 1) * shard_size if i + 1 < num_shards else len(partition)
result.append(partition[begin:end])
shards.append(result)
return shards
def _shuffle(partitions):
"""Shuffle each partition independently."""
for partition in partitions:
random.shuffle(partition, random.random)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/core/dataloader/tfrecord/converter_lib.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Toolkit file encoding APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from eff_tao_encryption.tao_codec import encrypt_stream, decrypt_stream
def encode(input_stream, output_stream, encode_key):
"""Encode a stream using a byte object key as the encoding 'password'.
Derives the 256 bit key using PBKDF2 password stretching and encodes using
AES in CTR mode 1024 bytes at a time. Includes the raw initialization value
-- 'nonce' -- as the first item in the encoded ciphertext.
NOTE: this function does not close the input or output stream.
The stream is duck-typed, as long as the object can be read N bits at a
time via read(N) and written to using write(BYTES_OBJECT).
Args:
input_stream (open readable binary io stream): Input stream, typically
an open file. Data read from this stream is encoded and written to
the output_stream.
output_stream (open writable binary io stream): Writable output stream
where the encoded data of the input_file is written to.
encode_key (bytes): byte text representing the encoding password.
b"yourkey"
Returns:
Nothing.
Raises:
TypeError: if the encode_key is not a byte object
"""
# Validate encode_key input and then derive the key.
if not isinstance(encode_key, bytes):
try:
# if this is str, let's try to encode it into bytes.
encode_key = str.encode(encode_key)
except Exception:
raise TypeError("encode_key must be passed as a byte object")
encrypt_stream(
input_stream, output_stream,
encode_key, encryption=True, rewind=False
)
def decode(input_stream, output_stream, encode_key):
"""Decode a stream using byte object key as the decoding 'password'.
Derives the 256 bit key using PBKDF2 password stretching and decodes
a stream encoded using AES in CTR mode by the above encode function.
Processes 1024 bytes at a time and uses the 'nonce' value included at
the beginning of the cipher text input stream.
NOTE: This function does not delete the encoded cipher text.
Args:
input_stream (open readable binary io stream): Encoded input stream,
typically an open file. Data read from this stream is decoded and
written to the output_stream.
output_stream (open writable binary io stream): Writable output stream
where the decoded data of the input_file is written to.
encode_key (bytes): byte text representing the encoding password.
b"yourkey".
Returns:
Nothing.
Raises:
TypeError: if the encode_key is not a byte object
ValueError: if a valid nonce value can't be read from the given file.
"""
# Validate encode_key input and derive the key.
if not isinstance(encode_key, bytes):
try:
# if this is str, let's try to encode it into bytes.
encode_key = str.encode(encode_key)
except Exception:
raise TypeError("encode_key must be passed as a byte object")
decrypt_stream(
input_stream, output_stream,
encode_key, encryption=True, rewind=False
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/encoding/encoding.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tempfile import TemporaryFile
import pytest
from nvidia_tao_tf1.encoding import encoding
test_byte_key = b"Valid-Byte-Object-Key"
other_byte_key = b"Different-Valid-Byte-Object-Key"
test_file_size = 2001
_CHUNK_SIZE = 1024 # bytes
__NONCE_SIZE = 16 # bytes -- 128 bit
# encode function error testing
@pytest.mark.parametrize("encoding_file_size, encoding_password, \
expected_result",
[(test_file_size, 123, TypeError),
(test_file_size, 123.456, TypeError),
(test_file_size, [], TypeError),
(test_file_size, True, TypeError)])
def test_encode_failures(encoding_file_size, encoding_password,
expected_result):
with pytest.raises(expected_result):
with TemporaryFile() as tmp_random_test_file:
with TemporaryFile() as tmp_encoding_target_file:
tmp_random_test_file.write(os.urandom(encoding_file_size))
tmp_random_test_file.seek(0)
encoding.encode(tmp_random_test_file, tmp_encoding_target_file,
encoding_password)
# decode function error testing
@pytest.mark.parametrize("encoding_file_size, encoding_password, \
expected_result",
[(test_file_size, 123, TypeError),
(test_file_size, 123.456, TypeError),
(test_file_size, [], TypeError),
(test_file_size, True, TypeError),
(0, test_byte_key, ValueError),
(__NONCE_SIZE-1, test_byte_key, ValueError)])
def test_decode_failures(encoding_file_size, encoding_password,
expected_result):
with pytest.raises(expected_result):
with TemporaryFile() as temp_encoded_file:
with TemporaryFile() as tmp_decoding_target_file:
temp_encoded_file.write(os.urandom(encoding_file_size))
temp_encoded_file.seek(0)
encoding.decode(temp_encoded_file, tmp_decoding_target_file,
encoding_password)
# Validation testing
@pytest.mark.parametrize("encoding_file_size, encoding_password",
# No payload, encoded file is only the nonce value
[(0, test_byte_key),
# 1 partial iteration
(_CHUNK_SIZE-1, test_byte_key),
# 2 complete iterations, no partial chunk
((_CHUNK_SIZE*2), test_byte_key),
# 3 iterations, including a partial 3rd chunk
((_CHUNK_SIZE*2)+1, test_byte_key)])
def test_validate_encode_decode_of_random_file(encoding_file_size,
encoding_password):
with TemporaryFile() as tmp_random_test_file,\
TemporaryFile() as tmp_encoding_target_file:
with TemporaryFile() as tmp_decoding_target_file,\
TemporaryFile() as bad_key_target_file:
random_payload = os.urandom(encoding_file_size)
tmp_random_test_file.write(random_payload)
tmp_random_test_file.seek(0)
encoding.encode(tmp_random_test_file, tmp_encoding_target_file,
encoding_password)
tmp_encoding_target_file.seek(0)
encoding.decode(tmp_encoding_target_file, tmp_decoding_target_file,
encoding_password)
tmp_encoding_target_file.seek(0)
tmp_decoding_target_file.seek(0)
assert tmp_decoding_target_file.read() == random_payload
if encoding_file_size > 0:
encoding.decode(tmp_encoding_target_file, bad_key_target_file,
other_byte_key)
bad_key_target_file.seek(0)
assert bad_key_target_file.read() != random_payload
| tao_tensorflow1_backend-main | nvidia_tao_tf1/encoding/test_encoding.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MagNet encryption APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.encoding.encoding import decode
from nvidia_tao_tf1.encoding.encoding import encode
__all__ = ('decode', 'encode')
| tao_tensorflow1_backend-main | nvidia_tao_tf1/encoding/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CV root module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA LPRNet root module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/__init__.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/lprnet/proto/lpr_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/lprnet/proto/lpr_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n/nvidia_tao_tf1/cv/lprnet/proto/lpr_config.proto\"\xa0\x01\n\x0cLPRNetConfig\x12\x17\n\x0fsequence_length\x18\x01 \x01(\r\x12\x14\n\x0chidden_units\x18\x02 \x01(\r\x12\x18\n\x10max_label_length\x18\x03 \x01(\r\x12\x0c\n\x04\x61rch\x18\x04 \x01(\t\x12\x0f\n\x07nlayers\x18\x05 \x01(\r\x12\x15\n\rfreeze_blocks\x18\x06 \x03(\r\x12\x11\n\tfreeze_bn\x18\x07 \x01(\x08\x62\x06proto3')
)
_LPRNETCONFIG = _descriptor.Descriptor(
name='LPRNetConfig',
full_name='LPRNetConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='sequence_length', full_name='LPRNetConfig.sequence_length', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hidden_units', full_name='LPRNetConfig.hidden_units', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_label_length', full_name='LPRNetConfig.max_label_length', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='arch', full_name='LPRNetConfig.arch', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nlayers', full_name='LPRNetConfig.nlayers', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freeze_blocks', full_name='LPRNetConfig.freeze_blocks', index=5,
number=6, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freeze_bn', full_name='LPRNetConfig.freeze_bn', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=52,
serialized_end=212,
)
DESCRIPTOR.message_types_by_name['LPRNetConfig'] = _LPRNETCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
LPRNetConfig = _reflection.GeneratedProtocolMessageType('LPRNetConfig', (_message.Message,), dict(
DESCRIPTOR = _LPRNETCONFIG,
__module__ = 'nvidia_tao_tf1.cv.lprnet.proto.lpr_config_pb2'
# @@protoc_insertion_point(class_scope:LPRNetConfig)
))
_sym_db.RegisterMessage(LPRNetConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/proto/lpr_config_pb2.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/proto/__init__.py |
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/lprnet/proto/lp_sequence_dataset_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/lprnet/proto/lp_sequence_dataset_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n?nvidia_tao_tf1/cv/lprnet/proto/lp_sequence_dataset_config.proto\"H\n\nDataSource\x12\x1c\n\x14label_directory_path\x18\x01 \x01(\t\x12\x1c\n\x14image_directory_path\x18\x02 \x01(\t\"\x80\x01\n\x0fLPDatasetConfig\x12!\n\x0c\x64\x61ta_sources\x18\x01 \x03(\x0b\x32\x0b.DataSource\x12\x1c\n\x14\x63haracters_list_file\x18\x02 \x01(\t\x12,\n\x17validation_data_sources\x18\x03 \x03(\x0b\x32\x0b.DataSourceb\x06proto3')
)
_DATASOURCE = _descriptor.Descriptor(
name='DataSource',
full_name='DataSource',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='label_directory_path', full_name='DataSource.label_directory_path', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_directory_path', full_name='DataSource.image_directory_path', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=67,
serialized_end=139,
)
_LPDATASETCONFIG = _descriptor.Descriptor(
name='LPDatasetConfig',
full_name='LPDatasetConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='data_sources', full_name='LPDatasetConfig.data_sources', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='characters_list_file', full_name='LPDatasetConfig.characters_list_file', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='validation_data_sources', full_name='LPDatasetConfig.validation_data_sources', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=142,
serialized_end=270,
)
_LPDATASETCONFIG.fields_by_name['data_sources'].message_type = _DATASOURCE
_LPDATASETCONFIG.fields_by_name['validation_data_sources'].message_type = _DATASOURCE
DESCRIPTOR.message_types_by_name['DataSource'] = _DATASOURCE
DESCRIPTOR.message_types_by_name['LPDatasetConfig'] = _LPDATASETCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DataSource = _reflection.GeneratedProtocolMessageType('DataSource', (_message.Message,), dict(
DESCRIPTOR = _DATASOURCE,
__module__ = 'nvidia_tao_tf1.cv.lprnet.proto.lp_sequence_dataset_config_pb2'
# @@protoc_insertion_point(class_scope:DataSource)
))
_sym_db.RegisterMessage(DataSource)
LPDatasetConfig = _reflection.GeneratedProtocolMessageType('LPDatasetConfig', (_message.Message,), dict(
DESCRIPTOR = _LPDATASETCONFIG,
__module__ = 'nvidia_tao_tf1.cv.lprnet.proto.lp_sequence_dataset_config_pb2'
# @@protoc_insertion_point(class_scope:LPDatasetConfig)
))
_sym_db.RegisterMessage(LPDatasetConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/proto/lp_sequence_dataset_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/lprnet/proto/augmentation_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/lprnet/proto/augmentation_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n8nvidia_tao_tf1/cv/lprnet/proto/augmentation_config.proto\"\xf2\x01\n\x12\x41ugmentationConfig\x12\x14\n\x0coutput_width\x18\x01 \x01(\x05\x12\x15\n\routput_height\x18\x02 \x01(\x05\x12\x16\n\x0eoutput_channel\x18\x03 \x01(\x05\x12\x19\n\x11max_rotate_degree\x18\x04 \x01(\x05\x12\x13\n\x0brotate_prob\x18\x06 \x01(\x02\x12\x1c\n\x14gaussian_kernel_size\x18\x07 \x03(\x05\x12\x11\n\tblur_prob\x18\x08 \x01(\x02\x12\x1a\n\x12reverse_color_prob\x18\t \x01(\x02\x12\x1a\n\x12keep_original_prob\x18\x05 \x01(\x02\x62\x06proto3')
)
_AUGMENTATIONCONFIG = _descriptor.Descriptor(
name='AugmentationConfig',
full_name='AugmentationConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='output_width', full_name='AugmentationConfig.output_width', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_height', full_name='AugmentationConfig.output_height', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_channel', full_name='AugmentationConfig.output_channel', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_rotate_degree', full_name='AugmentationConfig.max_rotate_degree', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rotate_prob', full_name='AugmentationConfig.rotate_prob', index=4,
number=6, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gaussian_kernel_size', full_name='AugmentationConfig.gaussian_kernel_size', index=5,
number=7, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='blur_prob', full_name='AugmentationConfig.blur_prob', index=6,
number=8, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reverse_color_prob', full_name='AugmentationConfig.reverse_color_prob', index=7,
number=9, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='keep_original_prob', full_name='AugmentationConfig.keep_original_prob', index=8,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=61,
serialized_end=303,
)
DESCRIPTOR.message_types_by_name['AugmentationConfig'] = _AUGMENTATIONCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AugmentationConfig = _reflection.GeneratedProtocolMessageType('AugmentationConfig', (_message.Message,), dict(
DESCRIPTOR = _AUGMENTATIONCONFIG,
__module__ = 'nvidia_tao_tf1.cv.lprnet.proto.augmentation_config_pb2'
# @@protoc_insertion_point(class_scope:AugmentationConfig)
))
_sym_db.RegisterMessage(AugmentationConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/proto/augmentation_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/lprnet/proto/experiment.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_tf1.cv.common.proto import training_config_pb2 as nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_training__config__pb2
from nvidia_tao_tf1.cv.lprnet.proto import lp_sequence_dataset_config_pb2 as nvidia__tao__tf1_dot_cv_dot_lprnet_dot_proto_dot_lp__sequence__dataset__config__pb2
from nvidia_tao_tf1.cv.lprnet.proto import augmentation_config_pb2 as nvidia__tao__tf1_dot_cv_dot_lprnet_dot_proto_dot_augmentation__config__pb2
from nvidia_tao_tf1.cv.lprnet.proto import eval_config_pb2 as nvidia__tao__tf1_dot_cv_dot_lprnet_dot_proto_dot_eval__config__pb2
from nvidia_tao_tf1.cv.lprnet.proto import lpr_config_pb2 as nvidia__tao__tf1_dot_cv_dot_lprnet_dot_proto_dot_lpr__config__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/lprnet/proto/experiment.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n/nvidia_tao_tf1/cv/lprnet/proto/experiment.proto\x1a\x34nvidia_tao_tf1/cv/common/proto/training_config.proto\x1a?nvidia_tao_tf1/cv/lprnet/proto/lp_sequence_dataset_config.proto\x1a\x38nvidia_tao_tf1/cv/lprnet/proto/augmentation_config.proto\x1a\x30nvidia_tao_tf1/cv/lprnet/proto/eval_config.proto\x1a/nvidia_tao_tf1/cv/lprnet/proto/lpr_config.proto\"\xec\x01\n\nExperiment\x12\x13\n\x0brandom_seed\x18\x01 \x01(\r\x12(\n\x0e\x64\x61taset_config\x18\x02 \x01(\x0b\x32\x10.LPDatasetConfig\x12\x30\n\x13\x61ugmentation_config\x18\x03 \x01(\x0b\x32\x13.AugmentationConfig\x12(\n\x0ftraining_config\x18\x04 \x01(\x0b\x32\x0f.TrainingConfig\x12 \n\x0b\x65val_config\x18\x05 \x01(\x0b\x32\x0b.EvalConfig\x12!\n\nlpr_config\x18\x06 \x01(\x0b\x32\r.LPRNetConfigb\x06proto3')
,
dependencies=[nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_training__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_lprnet_dot_proto_dot_lp__sequence__dataset__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_lprnet_dot_proto_dot_augmentation__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_lprnet_dot_proto_dot_eval__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_lprnet_dot_proto_dot_lpr__config__pb2.DESCRIPTOR,])
_EXPERIMENT = _descriptor.Descriptor(
name='Experiment',
full_name='Experiment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='random_seed', full_name='Experiment.random_seed', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dataset_config', full_name='Experiment.dataset_config', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='augmentation_config', full_name='Experiment.augmentation_config', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='training_config', full_name='Experiment.training_config', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eval_config', full_name='Experiment.eval_config', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lpr_config', full_name='Experiment.lpr_config', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=328,
serialized_end=564,
)
_EXPERIMENT.fields_by_name['dataset_config'].message_type = nvidia__tao__tf1_dot_cv_dot_lprnet_dot_proto_dot_lp__sequence__dataset__config__pb2._LPDATASETCONFIG
_EXPERIMENT.fields_by_name['augmentation_config'].message_type = nvidia__tao__tf1_dot_cv_dot_lprnet_dot_proto_dot_augmentation__config__pb2._AUGMENTATIONCONFIG
_EXPERIMENT.fields_by_name['training_config'].message_type = nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_training__config__pb2._TRAININGCONFIG
_EXPERIMENT.fields_by_name['eval_config'].message_type = nvidia__tao__tf1_dot_cv_dot_lprnet_dot_proto_dot_eval__config__pb2._EVALCONFIG
_EXPERIMENT.fields_by_name['lpr_config'].message_type = nvidia__tao__tf1_dot_cv_dot_lprnet_dot_proto_dot_lpr__config__pb2._LPRNETCONFIG
DESCRIPTOR.message_types_by_name['Experiment'] = _EXPERIMENT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Experiment = _reflection.GeneratedProtocolMessageType('Experiment', (_message.Message,), dict(
DESCRIPTOR = _EXPERIMENT,
__module__ = 'nvidia_tao_tf1.cv.lprnet.proto.experiment_pb2'
# @@protoc_insertion_point(class_scope:Experiment)
))
_sym_db.RegisterMessage(Experiment)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/proto/experiment_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/lprnet/proto/eval_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/lprnet/proto/eval_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n0nvidia_tao_tf1/cv/lprnet/proto/eval_config.proto\"K\n\nEvalConfig\x12)\n!validation_period_during_training\x18\x01 \x01(\r\x12\x12\n\nbatch_size\x18\x02 \x01(\rb\x06proto3')
)
_EVALCONFIG = _descriptor.Descriptor(
name='EvalConfig',
full_name='EvalConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='validation_period_during_training', full_name='EvalConfig.validation_period_during_training', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batch_size', full_name='EvalConfig.batch_size', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=52,
serialized_end=127,
)
DESCRIPTOR.message_types_by_name['EvalConfig'] = _EVALCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
EvalConfig = _reflection.GeneratedProtocolMessageType('EvalConfig', (_message.Message,), dict(
DESCRIPTOR = _EVALCONFIG,
__module__ = 'nvidia_tao_tf1.cv.lprnet.proto.eval_config_pb2'
# @@protoc_insertion_point(class_scope:EvalConfig)
))
_sym_db.RegisterMessage(EvalConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/proto/eval_config_pb2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TLT LPRNet CTC loss."""
import tensorflow as tf
class WrapCTCLoss:
"""Wrap tf ctc loss into keras loss style."""
def __init__(self, max_label_length):
"""Initialize CTC loss's parameter."""
self.max_label_length = max_label_length
def compute_loss(self, y_true, y_pred):
"""Compute CTC loss."""
label_input = tf.reshape(y_true[:, 0:self.max_label_length],
(-1, self.max_label_length))
ctc_input_length = tf.reshape(y_true[:, -2],
(-1, 1))
label_length = tf.reshape(y_true[:, -1],
(-1, 1))
ctc_loss = tf.keras.backend.ctc_batch_cost(label_input,
y_pred,
ctc_input_length,
label_length)
return tf.reduce_mean(ctc_loss)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/loss/wrap_ctc_loss.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/loss/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test lprnet loss."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nvidia_tao_tf1.cv.lprnet.loss.wrap_ctc_loss import WrapCTCLoss
def test_loss():
loss = WrapCTCLoss(8)
y_true = [[0, 0, 0, 0, 0, 0, 0, 0, 24, 8]]
y_pred_one = [1.0, 0, 0]
y_pred = [[y_pred_one for _ in range(24)]]
with tf.Session() as sess:
sess.run(loss.compute_loss(tf.constant(y_true), tf.constant(y_pred)))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/loss/tests/test_wrap_ctc_loss.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""License plate accuracy callback."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.lprnet.utils.ctc_decoder import decode_ctc_conf
class LPRAccuracyCallback(tf.keras.callbacks.Callback):
"""License plate accuracy callback."""
def __init__(self, eval_model, eval_interval, val_dataset, verbose=1):
"""init LPR accuracy callback."""
self.eval_model = eval_model
self.eval_interval = max(1, int(eval_interval))
self.val_dataset = val_dataset
self.verbose = verbose
def _get_accuracy(self, logs):
"""compute accuracy."""
# evaluation
self.eval_model.set_weights(self.model.get_weights())
total_cnt = self.val_dataset.n_samples
correct = 0
batch_size = self.val_dataset.batch_size
classes = self.val_dataset.classes
for idx in range(len(self.val_dataset)):
# prepare data:
batch_x, batch_y = self.val_dataset[idx]
# predict:
prediction = self.eval_model.predict(x=batch_x, batch_size=batch_size)
# decode prediction
decoded_lp, _ = decode_ctc_conf(prediction,
classes=classes,
blank_id=len(classes))
for idx, lp in enumerate(decoded_lp):
if lp == batch_y[idx]:
correct += 1
if logs is not None:
logs['accuracy'] = float(correct)/float(total_cnt)
print("\n")
print("*******************************************")
print("Accuracy: {} / {} {}".format(correct, total_cnt,
float(correct)/float(total_cnt)))
print("*******************************************")
print("\n")
kpi_data = {
"validation_accuracy": round(float(correct)/float(total_cnt), 5)
}
s_logger = status_logging.get_status_logger()
if isinstance(s_logger, status_logging.StatusLogger):
s_logger.kpi = kpi_data
s_logger.write(
status_level=status_logging.Status.RUNNING,
message="Evaluation metrics generated."
)
def on_epoch_end(self, epoch, logs):
"""evaluate at the epoch end."""
self.epoch_cnt = epoch + 1
if self.epoch_cnt % self.eval_interval != 0:
logs['accuracy'] = np.nan
else:
self._get_accuracy(logs)
def on_train_end(self, logs=None):
"""compute the accuracy at the end of training."""
if self.epoch_cnt % self.eval_interval != 0:
self._get_accuracy(logs)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/callbacks/ac_callback.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Status Logger callback."""
from collections import Iterable
from datetime import timedelta
import os
import time
import numpy as np
import six
import tensorflow as tf
from nvidia_tao_tf1.cv.common.logging.logging import (
get_status_logger,
Status,
StatusLogger,
Verbosity
)
# Get default status logger() if it's been previously defined.
logger = get_status_logger()
KEY_MAP = {
"val_loss": "validation_loss",
"val_acc": "validation_accuracy",
"loss": "loss",
"acc": "training_accuracy",
"lr": "learning_rate"
}
class TAOStatusLogger(tf.keras.callbacks.Callback):
"""Callback that streams the data training data to a status.json file.
Supports all values that can be represented as a string,
including 1D iterables such as np.ndarray.
# Example
```python
logger = TAOStatusLogger('/path/to/results_dir')
model.fit(X_train, Y_train, callbacks=[logger])
```
# Arguments
results_dir (str): The directory where the logs will be saved.
num_epochs (int): Number of epochs to run the training
verbosity (status_logger.verbosity.Verbosity()): Verbosity level.
is_master (bool): Boolean value to check if the gpu rank is 0.
append: True: append if file exists (useful for continuing
training). False: overwrite existing file,
"""
def __init__(self, results_dir, num_epochs=120,
verbosity=Verbosity.INFO,
append=False,
is_master=False):
"""Instantiate the TAOStatusLogger."""
# Make sure that the status logger obtained is always
# an instance of nvidia_tao_tf1.cv.common.logging.logging.StatusLogger.
# Otherwise, this data get's rendered in stdout.
if isinstance(logger, StatusLogger):
self.logger = logger
print("CATCH GLOBAL STATUS LOGGER!!!!!!!!\n")
else:
self.logger = StatusLogger(
filename=os.path.join(results_dir, "status.json"),
is_master=is_master,
verbosity=verbosity,
append=append
)
self.keys = None
self.max_epochs = num_epochs
self._epoch_start_time = None
super(TAOStatusLogger, self).__init__()
def on_train_begin(self, logs=None):
"""Write data beginning of the training."""
self.logger.write(
status_level=Status.STARTED,
message="Starting Training Loop."
)
@staticmethod
def _handle_value(k):
is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0
if isinstance(k, six.string_types):
return k
if isinstance(k, Iterable) and not is_zero_dim_ndarray:
return '"[%s]"' % (', '.join(map(str, k)))
return k
def on_epoch_begin(self, epoch, logs=None):
"""Routines to be run at the beginning of the epoch."""
self._epoch_start_time = time.time()
def on_epoch_end(self, epoch, logs=None):
"""Collect data at the end of an epoch."""
logs = logs or {}
data = {}
data["epoch"] = epoch + 1
data["max_epoch"] = self.max_epochs
epoch_end_time = time.time()
time_per_epoch = epoch_end_time - self._epoch_start_time
eta = (self.max_epochs - (epoch + 1)) * time_per_epoch
data["time_per_epoch"] = str(timedelta(seconds=time_per_epoch))
data["eta"] = str(timedelta(seconds=eta))
graphical_data = {}
kpi_data = {}
for k, v in logs.items():
if "loss" in k:
key = KEY_MAP[k] if k in KEY_MAP.keys() else k
graphical_data[key] = str(self._handle_value(v))
if "acc" in k:
key = KEY_MAP[k] if k in KEY_MAP.keys() else k
graphical_data[key] = str(self._handle_value(v))
kpi_data[key] = str(self._handle_value(v))
if k == "lr":
graphical_data[KEY_MAP[k]] = str(self._handle_value(v))
self.logger.graphical = graphical_data
self.logger.kpi = kpi_data
self.logger.write(data=data, message="Training loop in progress")
def on_train_end(self, logs=None):
"""Callback function run at the end of training."""
self.logger.write(
status_level=Status.RUNNING,
message="Training loop complete."
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/callbacks/loggers.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Soft start annealing learning rate schedule."""
from math import exp, log
from tensorflow import keras
class SoftStartAnnealingLearningRateScheduler(keras.callbacks.Callback):
"""Learning rate scheduler implementation.
Learning rate scheduler modulates learning rate according to the progress in the
training experiment. Specifically the training progress is defined as the ratio of
the current iteration to the maximum iterations. Learning rate scheduler adjusts
learning rate in the following 3 phases:
Phase 1: 0.0 <= progress < soft_start:
Starting from min_lr exponentially increase the learning rate to base_lr
Phase 2: soft_start <= progress < annealing_start:
Maintain the learning rate at base_lr
Phase 3: annealing_start <= progress <= 1.0:
Starting from base_lr exponentially decay the learning rate to min_lr
Example:
```python
lrscheduler = SoftStartAnnealingLearningRateScheduler(
max_iterations=max_iterations)
model.fit(X_train, Y_train, callbacks=[lrscheduler])
```
Args:
base_lr: Maximum learning rate
min_lr_ratio: The ratio between minimum learning rate (min_lr) and base_lr
soft_start: The progress at which learning rate achieves base_lr when starting from min_lr
annealing_start: The progress at which learning rate starts to drop from base_lr to min_lr
max_iterations: Total number of iterations in the experiment
"""
def __init__(self, max_iterations, base_lr=5e-4, min_lr_ratio=0.01, soft_start=0.1,
annealing_start=0.7):
"""__init__ method."""
super(SoftStartAnnealingLearningRateScheduler, self).__init__()
if not 0.0 <= soft_start <= 1.0:
raise ValueError('The soft_start varible should be >= 0.0 or <= 1.0.')
if not 0.0 <= annealing_start <= 1.0:
raise ValueError('The annealing_start variable should be >= 0.0 or <= 1.0.')
if not soft_start < annealing_start:
raise ValueError('Varialbe soft_start should not be less than annealing_start.')
self.base_lr = base_lr
self.min_lr_ratio = min_lr_ratio
self.soft_start = soft_start # Increase to lr from min_lr until this point.
self.annealing_start = annealing_start # Start annealing to min_lr at this point.
self.max_iterations = max_iterations
self.min_lr = min_lr_ratio * base_lr
self.global_step = 0
def reset(self, initial_step):
"""Reset global_step."""
self.global_step = initial_step
def update_global_step(self):
"""Increment global_step by 1."""
self.global_step += 1
def on_train_begin(self, logs=None):
"""on_train_begin method."""
self.reset(self.global_step)
lr = self.get_learning_rate(self.global_step / float(self.max_iterations))
keras.backend.set_value(self.model.optimizer.lr, lr)
def on_batch_end(self, batch, logs=None):
"""on_batch_end method."""
self.update_global_step()
progress = self.global_step / float(self.max_iterations)
lr = self.get_learning_rate(progress)
keras.backend.set_value(self.model.optimizer.lr, lr)
def on_epoch_end(self, epoch, logs):
"""on_epoch_end method."""
logs = logs or {}
logs['lr'] = keras.backend.get_value(self.model.optimizer.lr)
def get_learning_rate(self, progress):
"""Compute learning rate according to progress to reach max_iterations."""
if not 0. <= progress <= 1.:
raise ValueError('SoftStartAnnealingLearningRateScheduler '
'does not support a progress value < 0.0 or > 1.0 '
'received (%f)' % progress)
if not self.base_lr:
return self.base_lr
if self.soft_start > 0.0:
soft_start = progress / self.soft_start
else: # learning rate starts from base_lr
soft_start = 1.0
if self.annealing_start < 1.0:
annealing = (1.0 - progress) / (1.0 - self.annealing_start)
else: # learning rate is never annealed
annealing = 1.0
t = soft_start if progress < self.soft_start else 1.0
t = annealing if progress > self.annealing_start else t
lr = exp(log(self.min_lr) + t * (log(self.base_lr) - log(self.min_lr)))
return lr
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/callbacks/soft_start_annealing.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/callbacks/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encrypted model saver callback."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nvidia_tao_tf1.cv.lprnet.utils.model_io import save_model
class KerasModelSaver(tf.keras.callbacks.Callback):
"""Save the encrypted model after every epoch.
Attributes:
filepath: formated string for saving models
ENC_KEY: API key to encrypt the model.
"""
def __init__(self, filepath, key, save_period, last_epoch=None, verbose=1):
"""Initialization with encryption key."""
self.filepath = filepath
self._ENC_KEY = key
self.verbose = verbose
self.save_period = int(save_period)
self.last_epoch = last_epoch
assert self.save_period > 0, "save_period must be a positive integer!"
def on_epoch_end(self, epoch, logs=None):
"""Called at the end of an epoch."""
if (epoch + 1) % self.save_period == 0 or self.last_epoch == (epoch + 1):
fname = self.filepath.format(epoch=epoch + 1)
fname = save_model(self.model, fname, str.encode(self._ENC_KEY), '.hdf5')
if self.verbose > 0:
print('\nEpoch %05d: saving model to %s' % (epoch + 1, fname))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/callbacks/enc_model_saver.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""License plate tensorboard visualization callback."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
from PIL import Image
import tensorflow as tf
def make_image(tensor):
"""Convert an numpy representation image to Image protobuf."""
height, width, channel = tensor.shape
image = Image.fromarray(tensor)
output = io.BytesIO()
image.save(output, format='PNG')
image_string = output.getvalue()
output.close()
return tf.Summary.Image(height=height,
width=width,
colorspace=channel,
encoded_image_string=image_string)
class LPRNetTensorBoardImage(tf.keras.callbacks.Callback):
"""Add the augmented images to Tensorboard."""
def __init__(self, log_dir, num_imgs=3):
"""Init."""
super(LPRNetTensorBoardImage, self).__init__()
self.img = tf.Variable(0., collections=[tf.GraphKeys.LOCAL_VARIABLES], validate_shape=False)
self.writer = tf.summary.FileWriter(log_dir)
self.num_imgs = num_imgs
def on_batch_end(self, batch, logs=None):
"""on_batch_end method."""
augmented_imgs = tf.keras.backend.get_value(self.img)
# scale to uint8 255:
augmented_imgs = (augmented_imgs * 255.0).astype("uint8")
# NCHW -> NHWC:
augmented_imgs = augmented_imgs.transpose(0, 2, 3, 1)
# BGR -> RGB:
augmented_imgs = augmented_imgs[:, :, :, ::-1]
cnt = 0
summary_values = []
for img in augmented_imgs:
if cnt >= self.num_imgs:
break
tb_img = make_image(img)
summary_values.append(tf.Summary.Value(tag=f"batch_imgs/{cnt}", image=tb_img))
cnt += 1
summary = tf.Summary(value=summary_values)
self.writer.add_summary(summary, batch)
self.writer.flush()
def on_train_end(self, *args, **kwargs):
"""on_train_end method."""
self.writer.close()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/callbacks/tb_callback.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper function to decode ctc trained model's output."""
def decode_ctc_conf(pred,
classes,
blank_id):
'''
Decode ctc trained model's output.
return decoded license plate and confidence.
'''
pred_id = pred[0]
pred_conf = pred[1]
decoded_lp = []
decoded_conf = []
for idx_in_batch, seq in enumerate(pred_id):
seq_conf = pred_conf[idx_in_batch]
prev = seq[0]
tmp_seq = [prev]
tmp_conf = [seq_conf[0]]
for idx in range(1, len(seq)):
if seq[idx] != prev:
tmp_seq.append(seq[idx])
tmp_conf.append(seq_conf[idx])
prev = seq[idx]
lp = ""
output_conf = []
for index, i in enumerate(tmp_seq):
if i != blank_id:
lp += classes[i]
output_conf.append(tmp_conf[index])
decoded_lp.append(lp)
decoded_conf.append(output_conf)
return decoded_lp, decoded_conf
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/utils/ctc_decoder.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/utils/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper function to load model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from tensorflow import keras
from nvidia_tao_tf1.cv.common.utils import load_tf_keras_model
from nvidia_tao_tf1.cv.lprnet.loss.wrap_ctc_loss import WrapCTCLoss
from nvidia_tao_tf1.cv.lprnet.models import eval_builder, model_builder
from nvidia_tao_tf1.encoding import encoding
CUSTOM_OBJS = {
}
def load_model(model_path, max_label_length, key=None):
"""Load a model either in .h5 format, .tlt format or .hdf5 format."""
_, ext = os.path.splitext(model_path)
if ext == '.hdf5':
ctc_loss = WrapCTCLoss(max_label_length)
CUSTOM_OBJS['compute_loss'] = ctc_loss.compute_loss
# directly load model, add dummy loss since loss is never required.
model = load_tf_keras_model(model_path, custom_objects=CUSTOM_OBJS)
elif ext == '.tlt':
os_handle, temp_file_name = tempfile.mkstemp(suffix='.hdf5')
os.close(os_handle)
with open(temp_file_name, 'wb') as temp_file, open(model_path, 'rb') as encoded_file:
encoding.decode(encoded_file, temp_file, key)
encoded_file.close()
temp_file.close()
# recursive call
model = load_model(temp_file_name, max_label_length, None)
os.remove(temp_file_name)
else:
raise NotImplementedError("{0} file is not supported!".format(ext))
return model
def load_eval_model(model_path, max_label_length, key):
"""Load model in evaluation mode."""
keras.backend.set_learning_phase(0)
model_eval = load_model(model_path, max_label_length, key)
model_eval = eval_builder.build(model_eval)
keras.backend.set_learning_phase(1)
return model_eval
def load_model_as_pretrain(model_path, experiment_spec, key=None,
kernel_regularizer=None,
bias_regularizer=None,
resume_from_training=False):
"""
Load a model as pretrained weights.
If the model is pruned, just return the model.
Always return two models, first is for training, last is a template with input placeholder.
"""
model_train, model_eval, time_step = \
model_builder.build(experiment_spec,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer)
model_load = load_model(model_path,
experiment_spec.lpr_config.max_label_length,
key)
if resume_from_training:
model_eval = load_eval_model(model_path,
experiment_spec.lpr_config.max_label_length,
key)
return model_load, model_eval, time_step
strict_mode = True
error_layers = []
loaded_layers = []
for layer in model_train.layers[1:]:
try:
l_return = model_load.get_layer(layer.name)
except ValueError:
if layer.name[-3:] != 'qdq' and strict_mode:
error_layers.append(layer.name)
# Some layers are not there
continue
try:
wts = l_return.get_weights()
if len(wts) > 0:
layer.set_weights(wts)
loaded_layers.append(layer.name)
except ValueError:
if layer.name == "td_dense":
continue
if strict_mode:
# This is a pruned model
print('The shape of this layer does not match original model:', layer.name)
print('Loading the model as a pruned model.')
model_config = model_load.get_config()
for layer, layer_config in zip(model_load.layers, model_config['layers']):
if hasattr(layer, 'kernel_regularizer'):
layer_config['config']['kernel_regularizer'] = kernel_regularizer
reg_model = keras.models.Model.from_config(model_config, custom_objects=CUSTOM_OBJS)
reg_model.set_weights(model_load.get_weights())
os_handle, temp_file_name = tempfile.mkstemp(suffix='.hdf5')
os.close(os_handle)
reg_model.save(temp_file_name, overwrite=True, include_optimizer=False)
model_eval = load_eval_model(model_path,
experiment_spec.lpr_config.max_label_length,
key)
train_model = load_model(temp_file_name,
experiment_spec.lpr_config.max_label_length,
)
os.remove(temp_file_name)
return train_model, model_eval, time_step
error_layers.append(layer.name)
if len(error_layers) > 0:
print('Weights for those layers can not be loaded:', error_layers)
print('STOP trainig now and check the pre-train model if this is not expected!')
print("Layers that load weights from the pretrained model:", loaded_layers)
return model_train, model_eval, time_step
def save_model(keras_model, model_path, key, save_format=None):
"""Save a model to either .h5, .tlt or .hdf5 format."""
_, ext = os.path.splitext(model_path)
if (save_format is not None) and (save_format != ext):
# recursive call to save a correct model
return save_model(keras_model, model_path + save_format, key, None)
if ext == '.hdf5':
keras_model.save(model_path, overwrite=True, include_optimizer=True)
else:
raise NotImplementedError("{0} file is not supported!".format(ext))
return model_path
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/utils/model_io.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Load an experiment spec file to run SSD training, evaluation, pruning."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
from google.protobuf.text_format import Merge as merge_text_proto
from nvidia_tao_tf1.cv.common.spec_validator import SpecValidator, ValueChecker
import nvidia_tao_tf1.cv.lprnet.proto.experiment_pb2 as experiment_pb2
logger = logging.getLogger(__name__)
_LPRNet_VALUE_CHECKER_ = {"hidden_units": [ValueChecker(">", 0)],
"max_label_length": [ValueChecker(">", 0)],
"arch": [ValueChecker("!=", ""),
ValueChecker("in", ["baseline"])],
"nlayers": [ValueChecker(">", 0)],
"checkpoint_interval": [ValueChecker(">=", 0)],
"batch_size_per_gpu": [ValueChecker(">", 0)],
"num_epochs": [ValueChecker(">", 0)],
"min_learning_rate": [ValueChecker(">", 0)],
"max_learning_rate": [ValueChecker(">", 0)],
"soft_start": [ValueChecker(">", 0), ValueChecker("<", 1.0)],
"annealing": [ValueChecker(">", 0), ValueChecker("<", 1.0)],
"validation_period_during_training": [ValueChecker(">", 0)],
"batch_size": [ValueChecker(">", 0)],
"output_width": [ValueChecker(">", 32)],
"output_height": [ValueChecker(">", 32)],
"output_channel": [ValueChecker("in", [1, 3])],
"max_rotate_degree": [ValueChecker(">=", 0),
ValueChecker("<", 90)],
"rotate_prob": [ValueChecker(">=", 0),
ValueChecker("<=", 1.0)],
"gaussian_kernel_size": [ValueChecker(">", 0)],
"blur_prob": [ValueChecker(">=", 0),
ValueChecker("<=", 1.0)],
"reverse_color_prob": [ValueChecker(">=", 0),
ValueChecker("<=", 1.0)],
"keep_original_prob": [ValueChecker(">=", 0),
ValueChecker("<=", 1.0)],
"label_directory_path": [ValueChecker("!=", "")],
"image_directory_path": [ValueChecker("!=", "")],
"characters_list_file": [ValueChecker("!=", "")],
"monitor": [ValueChecker("in", ["loss"])],
"min_delta": [ValueChecker(">=", 0)],
"patience": [ValueChecker(">=", 0)]}
TRAIN_EXP_REQUIRED_MSG = ["lpr_config", "training_config", "eval_config",
"augmentation_config", "dataset_config"]
EVAL_EXP_REQUIRED_MSG = ["lpr_config", "eval_config",
"augmentation_config", "dataset_config"]
INFERENCE_EXP_REQUIRED_MSG = ["lpr_config", "eval_config",
"augmentation_config", "dataset_config"]
EXPORT_EXP_REQUIRED_MSG = ["lpr_config"]
_REQUIRED_MSG_ = {"training_config": ["learning_rate", "regularizer"],
"learning_rate": ["soft_start_annealing_schedule"],
"soft_start_annealing_schedule": ["min_learning_rate",
"max_learning_rate",
"soft_start",
"annealing"],
"dataset_config": ["data_sources"]}
lprnet_spec_validator = SpecValidator(required_msg_dict=_REQUIRED_MSG_,
value_checker_dict=_LPRNet_VALUE_CHECKER_)
def spec_validator(spec, required_msg=None):
"""do spec validation for LPRNet."""
if required_msg is None:
required_msg = []
lprnet_spec_validator.validate(spec, required_msg)
def load_proto(spec_path, proto_buffer, default_spec_path=None, merge_from_default=True):
"""Load spec from file and merge with given proto_buffer instance.
Args:
spec_path (str): location of a file containing the custom spec proto.
proto_buffer(pb2): protocal buffer instance to be loaded.
default_spec_path(str): location of default spec to use if merge_from_default is True.
merge_from_default (bool): disable default spec, if False, spec_path must be set.
Returns:
proto_buffer(pb2): protocol buffer instance updated with spec.
"""
def _load_from_file(filename, pb2):
with open(filename, "r") as f:
merge_text_proto(f.read(), pb2)
# Setting this flag false prevents concatenating repeated-fields
if merge_from_default:
assert default_spec_path, \
"default spec path has to be defined if merge_from_default is enabled"
# Load the default spec
_load_from_file(default_spec_path, proto_buffer)
else:
assert spec_path, "spec_path has to be defined, if merge_from_default is disabled"
# Merge a custom proto on top of the default spec, if given
if spec_path:
logger.info("Merging specification from %s", spec_path)
_load_from_file(spec_path, proto_buffer)
return proto_buffer
def load_experiment_spec(spec_path=None, merge_from_default=False):
"""Load experiment spec from a .txt file and return an experiment_pb2.Experiment object.
Args:
spec_path (str): location of a file containing the custom experiment spec proto.
dataset_export_spec_paths (list of str): paths to the dataset export specs.
merge_from_default (bool): disable default spec, if False, spec_path must be set.
Returns:
experiment_spec: protocol buffer instance of type experiment_pb2.Experiment.
"""
experiment_spec = experiment_pb2.Experiment()
file_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
default_spec_path = os.path.join(file_path, 'experiment_specs/default_spec.txt')
experiment_spec = load_proto(spec_path, experiment_spec, default_spec_path,
merge_from_default)
return experiment_spec
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/utils/spec_loader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper function to image processing."""
import random
import cv2
import numpy as np
def reverse_color(img):
'''reverse color.'''
img = 255 - img
return img
class GaussianBlur:
'''Gaussian blur.'''
def __init__(self, kernel_list):
"""init blur."""
self.kernel_list = kernel_list
def __call__(self, img):
"""Gaussian blur the image."""
kernel_size = self.kernel_list[random.randint(0, len(self.kernel_list)-1)]
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
class CenterRotate:
'''rotate within degree at center of license plate.'''
def __init__(self, degree):
"""init rotation degree."""
self.degree = degree
def __call__(self, img):
"""rotate the image."""
degree = random.randint(-self.degree,
self.degree)
rows, cols = img.shape[0:2]
m = cv2.getRotationMatrix2D((cols/2, rows/2), degree, 1)
img = cv2.warpAffine(img, m, (cols, rows))
return img
def preprocess(batch_img,
output_width,
output_height,
is_training=True,
output_channel=3,
keep_original_prob=0.3,
max_rotate_degree=5,
rotate_prob=0.5,
gaussian_kernel_size=None,
blur_prob=0.5,
reverse_color_prob=0.5
):
"""preprocess pipeline of the images."""
# if it is training, then augmented
# resize and scale: cv2 resize((width, height))
# Augmentation: blur, reverse color, rotate
augmented_batch_img = []
if is_training:
transform_op_list = []
transform_prob_list = []
if max_rotate_degree > 0:
rotate = CenterRotate(max_rotate_degree)
transform_op_list.append(rotate)
transform_prob_list.append(rotate_prob)
if (len(gaussian_kernel_size) > 0) and (blur_prob > 0):
blur = GaussianBlur(gaussian_kernel_size)
transform_op_list.append(blur)
transform_prob_list.append(blur_prob)
if reverse_color_prob > 0:
transform_op_list.append(reverse_color)
transform_prob_list.append(reverse_color_prob)
for img in batch_img:
if random.random() > keep_original_prob:
for transform, transform_prob in zip(transform_op_list, transform_prob_list):
if random.random() > transform_prob:
continue
img = transform(img)
augmented_batch_img.append(img)
else:
augmented_batch_img = batch_img
# batch_resized_img = np.array([(cv2.resize(img, (96, 48)))/255.0
batch_resized_img = np.array([(cv2.resize(img, (output_width, output_height)))/255.0
for img in augmented_batch_img],
dtype=np.float32)
if output_channel == 1:
batch_resized_img = batch_resized_img[..., np.newaxis]
# transpose image batch from NHWC (0, 1, 2, 3) to NCHW (0, 3, 1, 2)
batch_resized_img = batch_resized_img.transpose(0, 3, 1, 2)
return batch_resized_img
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/utils/img_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test lprnet model builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.cv.lprnet.utils.ctc_decoder import decode_ctc_conf
def test_ctc_decoder():
classes = ["a", "b", "c"]
blank_id = 3
pred_id = [[0, 0, 0, 3, 1, 2]]
pred_conf = [[0.99, 0.99, 0.99, 0.99, 0.99, 0.99]]
expected_lp = "abc"
decoded_lp, _ = decode_ctc_conf((pred_id, pred_conf), classes, blank_id)
assert expected_lp == decoded_lp[0]
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/utils/tests/test_ctc_decoder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test spec loader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import pytest
from nvidia_tao_tf1.cv.lprnet.utils.spec_loader import load_experiment_spec, spec_validator
def test_spec_loader():
experiment_spec = load_experiment_spec(merge_from_default=True)
assert experiment_spec.eval_config.validation_period_during_training > 0
assert experiment_spec.training_config.num_epochs > 0
assert experiment_spec.lpr_config.hidden_units == 512
assert experiment_spec.lpr_config.max_label_length == 8
assert experiment_spec.augmentation_config.max_rotate_degree == 5
def catch_assert_error(spec):
with pytest.raises(AssertionError):
spec_validator(spec)
def test_spec_validator():
experiment_spec = load_experiment_spec(merge_from_default=True)
# lpr_config_test:
test_spec = copy.deepcopy(experiment_spec)
test_spec.lpr_config.hidden_units = 0
catch_assert_error(test_spec)
test_spec = copy.deepcopy(experiment_spec)
test_spec.lpr_config.arch = "baselin"
catch_assert_error(test_spec)
test_spec = copy.deepcopy(experiment_spec)
test_spec.lpr_config.arch = ""
catch_assert_error(test_spec)
# train_config_test:
test_spec = copy.deepcopy(experiment_spec)
test_spec.training_config.num_epochs = 0
catch_assert_error(test_spec)
test_spec = copy.deepcopy(experiment_spec)
test_spec.training_config.learning_rate.soft_start_annealing_schedule.soft_start = 0
catch_assert_error(test_spec)
test_spec.training_config.early_stopping.monitor = "losss"
catch_assert_error(test_spec)
# eval_config_test:
test_spec = copy.deepcopy(experiment_spec)
test_spec.eval_config.batch_size = 0
catch_assert_error(test_spec)
# aug_config_test:
test_spec = copy.deepcopy(experiment_spec)
test_spec.augmentation_config.output_channel = 4
catch_assert_error(test_spec)
test_spec = copy.deepcopy(experiment_spec)
test_spec.augmentation_config.gaussian_kernel_size[:] = [0]
catch_assert_error(test_spec)
test_spec = copy.deepcopy(experiment_spec)
test_spec.augmentation_config.blur_prob = 1.1
catch_assert_error(test_spec)
# dataset_config_test:
test_spec = copy.deepcopy(experiment_spec)
test_spec.dataset_config.data_sources[0].label_directory_path = ""
catch_assert_error(test_spec)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/utils/tests/test_spec_loader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TLT LPRNet baseline model."""
import tensorflow as tf
# @TODO(tylerz): Shall we use fixed padding as caffe?
# def Conv2DFixedPadding(input, filters, kernel_size, strides):
# return net
_BATCH_NORM_DECAY = 0.997
_BATCH_NORM_EPSILON = 1e-4
def BNReLULayer(inputs, name, trainable, relu=True,
init_zero=False, data_format='channels_last'):
'''Fusion block of bn and relu.'''
if init_zero:
gamma_initializer = tf.keras.initializers.Zeros()
else:
gamma_initializer = tf.keras.initializers.Ones()
if data_format == 'channels_first':
axis = 1
else:
axis = -1
net = tf.keras.layers.BatchNormalization(
axis=axis,
momentum=_BATCH_NORM_DECAY,
epsilon=_BATCH_NORM_EPSILON,
center=True,
scale=True,
trainable=trainable,
gamma_initializer=gamma_initializer,
fused=True,
name=name
)(inputs)
if relu:
net = tf.keras.layers.ReLU()(net)
return net
def ResidualBlock(inputs, block_idx, filters, kernel_size,
strides, use_projection=False, finetune_bn=True,
trainable=True, data_format="channels_last",
kernel_regularizer=None, bias_regularizer=None):
'''Fusion layer of Residual block.'''
# # @TODO(tylerz): shall we init conv kernels with glorot_normal as caffe ????
# kernel_initializer = "glorot_normal"
# # @TODO(tylerz): shall we init conv bias with 0.2 as caffe???
# bias_initializer = tf.constant_initializer(0.2)
# branch1
if use_projection:
shortcut = tf.keras.layers.Conv2D(
filters=filters,
kernel_size=1,
strides=strides,
padding="valid",
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
name="res%s_branch1" % block_idx,
data_format=data_format,
trainable=trainable
)(inputs)
shortcut = BNReLULayer(inputs=shortcut, name="bn%s_branch1" % block_idx,
trainable=trainable and finetune_bn,
relu=False, init_zero=False,
data_format=data_format)
else:
shortcut = inputs
# branch2a
x = tf.keras.layers.Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding="same",
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
name="res%s_branch2a" % block_idx,
data_format=data_format,
trainable=trainable
)(inputs)
x = BNReLULayer(inputs=x, name="bn%s_branch2a" % block_idx,
trainable=trainable and finetune_bn,
relu=True, init_zero=False,
data_format=data_format)
# branch2b
x = tf.keras.layers.Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=(1, 1),
padding="same",
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
name="res%s_branch2b" % block_idx,
data_format=data_format,
trainable=trainable
)(x)
# @TODO(tylerz): Shall we init gamma zero here ??????
x = BNReLULayer(inputs=x, name="bn%s_branch2b" % block_idx,
trainable=trainable and finetune_bn,
relu=False, init_zero=False,
data_format=data_format)
net = tf.keras.layers.ReLU()(shortcut+x)
return net
class LPRNetbaseline:
'''Tuned ResNet18 and ResNet10 as the baseline for LPRNet.'''
def __init__(self, nlayers=18, freeze_bn=False,
kernel_regularizer=None, bias_regularizer=None):
'''Initialize the parameter for LPRNet baseline model.'''
self.finetune_bn = (not freeze_bn)
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
assert nlayers in (10, 18), print("LPRNet baseline model only supports 18 and 10 layers")
self.nlayers = nlayers
def __call__(self, input_tensor, trainable):
'''Generate LPRNet baseline model.'''
finetune_bn = self.finetune_bn
kernel_regularizer = self.kernel_regularizer
bias_regularizer = self.bias_regularizer
data_format = "channels_first"
# block1:
x = tf.keras.layers.Conv2D(filters=64,
kernel_size=3,
strides=(1, 1),
padding="same",
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
name="conv1",
data_format=data_format,
trainable=trainable
)(input_tensor)
x = BNReLULayer(inputs=x, name="bn_conv1",
trainable=trainable and finetune_bn,
relu=True, init_zero=False,
data_format=data_format)
x = tf.keras.layers.MaxPool2D(pool_size=3, strides=(1, 1),
padding="same", data_format=data_format)(x)
# block 2a
x = ResidualBlock(inputs=x, block_idx="2a", filters=64,
kernel_size=3, strides=(1, 1), use_projection=True,
finetune_bn=finetune_bn, trainable=trainable,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer)
if self.nlayers == 18:
# block 2b
x = ResidualBlock(inputs=x, block_idx="2b", filters=64,
kernel_size=3, strides=(1, 1), use_projection=False,
finetune_bn=finetune_bn, trainable=trainable,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer)
# block 3a
x = ResidualBlock(inputs=x, block_idx="3a", filters=128,
kernel_size=3, strides=(2, 2), use_projection=True,
finetune_bn=finetune_bn, trainable=trainable,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer)
if self.nlayers == 18:
# block 3b
x = ResidualBlock(inputs=x, block_idx="3b", filters=128,
kernel_size=3, strides=(1, 1), use_projection=False,
finetune_bn=finetune_bn, trainable=trainable,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer)
# block 4a
x = ResidualBlock(inputs=x, block_idx="4a", filters=256,
kernel_size=3, strides=(2, 2), use_projection=True,
finetune_bn=finetune_bn, trainable=trainable,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer)
if self.nlayers == 18:
# block 4b
x = ResidualBlock(inputs=x, block_idx="4b", filters=256,
kernel_size=3, strides=(1, 1), use_projection=False,
finetune_bn=finetune_bn, trainable=trainable,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer)
# block 5a
x = ResidualBlock(inputs=x, block_idx="5a", filters=300,
kernel_size=3, strides=(1, 1), use_projection=True,
finetune_bn=finetune_bn, trainable=trainable,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer)
if self.nlayers == 18:
# block 5b
x = ResidualBlock(inputs=x, block_idx="5b", filters=300,
kernel_size=3, strides=(1, 1), use_projection=False,
finetune_bn=finetune_bn, trainable=trainable,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer)
return x
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/models/lprnet_base_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TLT LPRNet model builder."""
import keras
import six
import tensorflow as tf
from nvidia_tao_tf1.cv.common.models.backbones import get_backbone
from nvidia_tao_tf1.cv.lprnet.models.lprnet_base_model import LPRNetbaseline
def eval_str(s):
"""If s is a string, return the eval results. Else return itself."""
if isinstance(s, six.string_types):
if len(s) > 0:
return eval(s)
return None
return s
class get_iva_template_model:
'''Get keras model from iva template and convert to tf.keras model.'''
def __init__(self, arch, nlayers,
kernel_regularizer=None,
bias_regularizer=None,
freeze_blocks=None,
freeze_bn=None):
'''Initialize iva template model parameters.'''
self.arch = arch
self.nlayers = nlayers
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.freeze_blocks = freeze_blocks
self.freeze_bn = freeze_bn
def __call__(self, input_tensor, trainable=True):
'''Generate iva template model.'''
input_shape = input_tensor.get_shape().as_list()
if trainable:
tf.keras.backend.set_learning_phase(1)
else:
tf.keras.backend.set_learning_phase(0)
dummy_input = keras.Input(shape=(input_shape[1], input_shape[2], input_shape[3]),
name="dummy_input")
dummy_model = get_backbone(dummy_input,
self.arch,
data_format='channels_first',
kernel_regularizer=self.kernel_regularizer,
bias_regularizer=self.bias_regularizer,
freeze_blocks=self.freeze_blocks,
freeze_bn=self.freeze_bn,
nlayers=self.nlayers,
use_batch_norm=True,
use_pooling=False,
use_bias=False,
all_projections=True,
dropout=1e-3,
force_relu=True)
model_config = dummy_model.to_json()
real_backbone = tf.keras.models.model_from_json(model_config)
output_tensor = real_backbone(input_tensor)
return output_tensor
def LPRNetmodel(input_shape,
backbone_func,
trainable,
n_class,
model_name,
hidden_units=512,
kernel_regularizer=None,
bias_regularizer=None):
'''build the LPRNet model and compute time_step.'''
image_input = tf.keras.Input(shape=input_shape, name="image_input")
model_input = tf.keras.backend.sum(image_input, axis=1, keepdims=True)
backbone_feature = backbone_func(model_input, trainable)
# chw -> whc
permuted_feature = tf.keras.layers.Permute((3, 2, 1),
name="permute_feature")(backbone_feature)
permuted_feature_shape = permuted_feature.get_shape().as_list()
time_step = permuted_feature_shape[1]
flatten_size = permuted_feature_shape[2]*permuted_feature_shape[3]
flatten_feature = tf.keras.layers.Reshape((time_step, flatten_size),
name="flatten_feature")(permuted_feature)
# LSTM input: [batch_size, 24, 12*300] LSTM output: [batch_size, 24, 512]
lstm_sequence = tf.keras.layers.LSTM(units=hidden_units, return_sequences=True,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer)(flatten_feature)
logic = tf.keras.layers.TimeDistributed(
tf.keras.layers.Dense(units=n_class + 1,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer), name="td_dense")(lstm_sequence)
prob = tf.keras.layers.Softmax(axis=-1)(logic) # shape: [batch_size, 24, 36]
if trainable:
model = tf.keras.Model(inputs=[image_input],
outputs=prob,
name=model_name)
return model, time_step
# For inference and evaluation
sequence_id = tf.keras.backend.argmax(prob, axis=-1) # [batch_size, 24]
sequence_prob = tf.keras.backend.max(prob, axis=-1) # [batch_sze, 24]
model = tf.keras.Model(inputs=[image_input],
outputs=[sequence_id, sequence_prob],
name=model_name)
return model, time_step
def build(experiment_spec,
kernel_regularizer=None,
bias_regularizer=None):
'''
Top builder for the LPRNetmodel.
return train_model, val_model and time_step
'''
input_width = experiment_spec.augmentation_config.output_width
input_height = experiment_spec.augmentation_config.output_height
input_channel = experiment_spec.augmentation_config.output_channel
model_arch = experiment_spec.lpr_config.arch
n_layers = experiment_spec.lpr_config.nlayers
freeze_bn = eval_str(experiment_spec.lpr_config.freeze_bn)
characters_list_file = experiment_spec.dataset_config.characters_list_file
with open(characters_list_file, "r") as f:
temp_list = f.readlines()
classes = [i.strip() for i in temp_list]
n_class = len(classes)
input_shape = (input_channel, input_height, input_width)
if model_arch == "baseline":
backbone_func = LPRNetbaseline(nlayers=n_layers,
freeze_bn=freeze_bn,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer
)
else:
# only support freeze blocks in iva template:
freeze_blocks = eval_str(experiment_spec.lpr_config.freeze_blocks)
backbone_func = get_iva_template_model(arch=model_arch,
nlayers=n_layers,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
freeze_blocks=freeze_blocks,
freeze_bn=freeze_bn
)
# build train graph
tf.keras.backend.set_learning_phase(1)
train_model, time_step = LPRNetmodel(input_shape, backbone_func=backbone_func,
model_name="lpnet_"+model_arch+"_"+str(n_layers),
n_class=n_class,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=True)
# build eval graph
tf.keras.backend.set_learning_phase(0)
eval_model, _ = LPRNetmodel(input_shape, backbone_func=backbone_func,
model_name="lpnet_"+model_arch+"_"+str(n_layers),
n_class=n_class,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
trainable=False)
# set back to training phase
tf.keras.backend.set_learning_phase(1)
return train_model, eval_model, time_step
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/models/model_builder.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/models/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''build model for evaluation.'''
import tensorflow as tf
def build(model):
'''Build model for evaluation.'''
prob = model.layers[-1].output
sequence_id = tf.keras.backend.argmax(prob, axis=-1)
sequence_prob = tf.keras.backend.max(prob, axis=-1)
eval_model = tf.keras.Model(inputs=[model.input],
outputs=[sequence_id, sequence_prob])
return eval_model
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/models/eval_builder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test lprnet model builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import pytest
import nvidia_tao_tf1.cv.lprnet.models.model_builder as model_builder
from nvidia_tao_tf1.cv.lprnet.utils.spec_loader import load_experiment_spec
backbone_configs = [
('baseline', 10, 24),
('baseline', 18, 24),
('resnet', 34, 6),
('resnet', 50, 6),
('vgg', 16, 6),
]
@pytest.fixture
def experiment_spec():
experiment_spec = load_experiment_spec(merge_from_default=True)
label = "abcdefg"
with open("tmp_ch_list.txt", "w") as f:
for ch in label:
f.write(ch + "\n")
experiment_spec.dataset_config.characters_list_file = "tmp_ch_list.txt"
yield experiment_spec
os.remove("tmp_ch_list.txt")
@pytest.mark.parametrize("model_arch, nlayers, expected_time_step",
backbone_configs)
def test_model_builder(model_arch, nlayers, expected_time_step, experiment_spec):
experiment_spec.lpr_config.arch = model_arch
experiment_spec.lpr_config.nlayers = nlayers
_, model, time_step = model_builder.build(experiment_spec)
assert time_step == expected_time_step
model.predict(np.random.randn(1, 3, 48, 96))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/models/tests/test_model_builder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test lprnet eval builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
import tensorflow as tf
from nvidia_tao_tf1.cv.lprnet.models import eval_builder
@pytest.fixture
def test_model():
x = tf.keras.Input(shape=(24, 36))
model = tf.keras.Model(inputs=x, outputs=x)
return model
def test_decoded_output(test_model):
eval_model = eval_builder.build(test_model)
assert len(eval_model.outputs) == 2
assert eval_model.outputs[0].shape[-1] == 24
assert eval_model.outputs[1].shape[-1] == 24
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/models/tests/test_eval_builder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test lprnet base model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pytest
import tensorflow as tf
from nvidia_tao_tf1.cv.lprnet.models.lprnet_base_model import LPRNetbaseline
correct_nlayers_list = [10, 18]
@pytest.mark.parametrize("nlayers",
correct_nlayers_list)
def test_lprnet_base_model(nlayers):
input_layer = tf.keras.Input(shape=(3, 48, 96))
output_layer = LPRNetbaseline(nlayers=nlayers)(input_layer, trainable=False)
model = tf.keras.Model(inputs=input_layer, outputs=output_layer)
model.predict(np.random.randn(1, 3, 48, 96))
def test_wrong_lprnet_base_model():
with pytest.raises(AssertionError):
LPRNetbaseline(nlayers=99)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/models/tests/test_lprnet_base_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA SSD entrypoint scripts."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to export a trained FasterRCNN model to an ETLT file for deployment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from datetime import datetime as dt
import logging
import os
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.lprnet.export.lprnet_exporter import LPRNetExporter as Exporter
logger = logging.getLogger(__name__)
DEFAULT_MAX_WORKSPACE_SIZE = 2 * (1 << 30)
DEFAULT_MAX_BATCH_SIZE = 1
def build_command_line_parser(parser=None):
"""Build a command line parser."""
if parser is None:
parser = argparse.ArgumentParser(description='Export a trained TLT model')
parser.add_argument("-m",
"--model",
help="Path to the model file.",
type=str,
required=True,
default=None)
parser.add_argument("-k",
"--key",
help="Key to load the model.",
default="",
type=str,
required=False)
parser.add_argument("-e",
"--experiment_spec",
type=str,
default=None,
required=True,
help="Path to the experiment spec file.")
parser.add_argument("-o",
"--output_file",
type=str,
default=None,
help="Output file (defaults to $(input_filename).etlt)")
# TLT 3.0 will only supports FP32 && FP16
parser.add_argument("--data_type",
type=str,
default="fp32",
help="Data type for the TensorRT export.",
choices=["fp32", "fp16"])
parser.add_argument("--max_workspace_size",
type=int,
default=DEFAULT_MAX_WORKSPACE_SIZE,
help="Max size of workspace to be set for TensorRT engine builder.")
parser.add_argument("--max_batch_size",
type=int,
default=DEFAULT_MAX_BATCH_SIZE,
help="Max batch size for TensorRT engine builder.")
parser.add_argument("--engine_file",
type=str,
default=None,
help="Path to the exported TRT engine.")
parser.add_argument("-v",
"--verbose",
action="store_true",
default=False,
help="Verbosity of the logger.")
parser.add_argument("-s",
"--strict_type_constraints",
action="store_true",
default=False,
help="Apply TensorRT strict_type_constraints or not")
parser.add_argument("--results_dir",
type=str,
default=None,
help="Path to the files where the logs are stored.")
parser.add_argument("--target_opset",
type=int,
default=None,
help="Target opset for ONNX models.")
# Int8 calibration arguments.
# parser.add_argument("--force_ptq",
# action="store_true",
# default=False,
# help="Flag to force post training quantization for QAT models.")
# parser.add_argument("--batch_size",
# type=int,
# default=16,
# help="Number of images per batch for calibration.")
# parser.add_argument("--cal_data_file",
# default="",
# type=str,
# help="Tensorfile to run calibration for int8 optimization.")
# parser.add_argument("--cal_image_dir",
# default="",
# type=str,
# help="Directory of images to run int8 calibration if "
# "data file is unavailable")
# parser.add_argument('--cal_cache_file',
# default='./cal.bin',
# type=str,
# help='Calibration cache file to write to.')
# parser.add_argument("--batches",
# type=int,
# default=10,
# help="Number of batches to calibrate over.")
return parser
def parse_command_line(args=None):
"""Simple function to parse arguments."""
parser = build_command_line_parser()
args = vars(parser.parse_args(args))
return args
def build_exporter(model_path, key,
experiment_spec="",
data_type="fp32",
strict_type=False,
target_opset=12):
"""Simple function to build exporter instance."""
constructor_kwargs = {'model_path': model_path,
'key': key,
"experiment_spec_path": experiment_spec,
'data_type': data_type,
'strict_type': strict_type,
'target_opset': target_opset}
return Exporter(**constructor_kwargs)
def main(cl_args=None):
"""CLI wrapper to run export.
This function parses the command line interface for tlt-export, instantiates the respective
exporter and serializes the trained model to an etlt file. The tools also runs optimization
to the int8 backend.
Args:
cl_args(list): Arguments to parse.
Returns:
No explicit returns.
"""
args = parse_command_line(args=cl_args)
run_export(args)
def run_export(args):
"""Wrapper to run export of tlt models.
Args:
args (dict): Dictionary of parsed arguments to run export.
Returns:
No explicit returns.
"""
# Parsing command line arguments.
model_path = args['model']
key = args['key']
data_type = args['data_type']
output_file = args['output_file']
experiment_spec = args['experiment_spec']
engine_file_name = args['engine_file']
max_workspace_size = args["max_workspace_size"]
max_batch_size = args["max_batch_size"]
strict_type = args['strict_type_constraints']
target_opset = args["target_opset"]
backend = "onnx"
# Calibrator configuration.
# cal_cache_file = args['cal_cache_file']
# cal_image_dir = args['cal_image_dir']
# cal_data_file = args['cal_data_file']
# batch_size = args['batch_size']
# n_batches = args['batches']
# force_ptq = args["force_ptq"]
# Status logger for the UI. By default this will be populated in /workspace/logs.
results_dir = args.get("results_dir", None)
if results_dir is not None:
if not os.path.exists(results_dir):
os.makedirs(results_dir)
timestamp = int(dt.timestamp(dt.now()))
filename = "status.json"
if results_dir == "/workspace/logs":
filename = f"status_export_{timestamp}.json"
status_file = os.path.join(results_dir, filename)
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True
)
)
status_logger = status_logging.get_status_logger()
save_engine = False
if engine_file_name is not None:
save_engine = True
log_level = "INFO"
if args['verbose']:
log_level = "DEBUG"
# Configure the logger.
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level=log_level)
# Set default output filename if the filename
# isn't provided over the command line.
if output_file is None:
split_name = os.path.splitext(model_path)[0]
output_file = f"{split_name}.{backend}"
if not (backend in output_file):
output_file = f"{output_file}.{backend}"
logger.info("Saving exported model to {}".format(output_file))
# Warn the user if an exported file already exists.
assert not os.path.exists(output_file), "Output file {} already "\
"exists".format(output_file)
# Make an output directory if necessary.
output_root = os.path.dirname(os.path.realpath(output_file))
if not os.path.exists(output_root):
os.makedirs(output_root)
# Build exporter instance
status_logger.write(message="Building exporter object.")
exporter = build_exporter(model_path, key,
experiment_spec=experiment_spec,
data_type=data_type,
strict_type=strict_type,
target_opset=target_opset)
# Export the model to etlt file and build the TRT engine.
status_logger.write(message="Exporting the model.")
exporter.export(output_file_name=output_file,
backend=backend,
save_engine=save_engine,
engine_file_name=engine_file_name,
max_batch_size=max_batch_size,
max_workspace_size=max_workspace_size,
gen_ds_config=False)
if __name__ == "__main__":
try:
main()
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Export finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Export was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/scripts/export.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perform continuous LPRNet training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from functools import lru_cache
import logging
from math import ceil
import os
import tensorflow as tf
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.mlops.clearml import get_clearml_task
from nvidia_tao_tf1.cv.lprnet.callbacks.ac_callback import LPRAccuracyCallback as ac_callback
from nvidia_tao_tf1.cv.lprnet.callbacks.enc_model_saver import KerasModelSaver
from nvidia_tao_tf1.cv.lprnet.callbacks.loggers import TAOStatusLogger
from nvidia_tao_tf1.cv.lprnet.callbacks.soft_start_annealing import \
SoftStartAnnealingLearningRateScheduler as LRS
from nvidia_tao_tf1.cv.lprnet.callbacks.tb_callback import LPRNetTensorBoardImage
from nvidia_tao_tf1.cv.lprnet.dataloader.data_sequence import LPRNetDataGenerator
from nvidia_tao_tf1.cv.lprnet.loss.wrap_ctc_loss import WrapCTCLoss
from nvidia_tao_tf1.cv.lprnet.models import model_builder
from nvidia_tao_tf1.cv.lprnet.utils.model_io import load_model_as_pretrain
from nvidia_tao_tf1.cv.lprnet.utils.spec_loader import (
load_experiment_spec,
spec_validator,
TRAIN_EXP_REQUIRED_MSG
)
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level='INFO')
logger = logging.getLogger(__name__)
verbose = 0
@lru_cache()
def hvd_tf_keras():
"""lazy import horovod."""
import horovod.tensorflow.keras as hvd
return hvd
def run_experiment(config_path, results_dir, resume_weights, key, init_epoch=1):
"""
Launch experiment that trains the model.
NOTE: Do not change the argument names without verifying that cluster submission works.
Args:
config_path (str): Path to a text file containing a complete experiment configuration.
results_dir (str): Path to a folder where various training outputs will be written.
If the folder does not already exist, it will be created.
resume_weights (str): Optional path to a pretrained model file.
init_epoch (int): The number of epoch to resume training.
"""
hvd = hvd_tf_keras()
hvd.init()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(hvd.local_rank())
sess = tf.Session(config=config)
tf.keras.backend.set_session(sess)
verbose = 1 if hvd.rank() == 0 else 0
is_master = hvd.rank() == 0
if is_master and not os.path.exists(results_dir):
os.makedirs(results_dir)
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=is_master,
verbosity=1,
append=True
)
)
# Load experiment spec.
experiment_spec = load_experiment_spec(config_path)
spec_validator(experiment_spec, TRAIN_EXP_REQUIRED_MSG)
training_config = experiment_spec.training_config
if hvd.rank() == 0:
if training_config.HasField("visualizer"):
if training_config.visualizer.HasField("clearml_config"):
clearml_config = training_config.visualizer.clearml_config
get_clearml_task(clearml_config, "lprnet")
# Load training parameters
num_epochs = experiment_spec.training_config.num_epochs
batch_size_per_gpu = experiment_spec.training_config.batch_size_per_gpu
ckpt_interval = experiment_spec.training_config.checkpoint_interval or 5
# config kernel regularizer
reg_type = experiment_spec.training_config.regularizer.type
reg_weight = experiment_spec.training_config.regularizer.weight
kr = None
br = None
if reg_type:
if reg_type > 0:
assert 0 < reg_weight < 1, \
"Weight decay should be no less than 0 and less than 1"
if reg_type == 1:
kr = tf.keras.regularizers.l1(reg_weight)
br = tf.keras.regularizers.l1(reg_weight)
else:
kr = tf.keras.regularizers.l2(reg_weight)
br = tf.keras.regularizers.l2(reg_weight)
# configure optimizer and loss
optimizer = tf.keras.optimizers.SGD(lr=0.0001,
momentum=0.9,
decay=0.0,
nesterov=False)
max_label_length = experiment_spec.lpr_config.max_label_length
ctc_loss = WrapCTCLoss(max_label_length)
# build train/eval model
if resume_weights is not None:
if init_epoch == 1:
resume_from_training = False
else:
resume_from_training = True
logger.info("Loading pretrained weights. This may take a while...")
model, model_eval, time_step = \
load_model_as_pretrain(resume_weights,
experiment_spec,
key, kr, br,
resume_from_training)
if init_epoch == 1:
print("Initialize optimizer")
model.compile(optimizer=hvd.DistributedOptimizer(optimizer),
loss=ctc_loss.compute_loss)
else:
print("Resume optimizer from pretrained model")
model.compile(optimizer=hvd.DistributedOptimizer(model.optimizer),
loss=ctc_loss.compute_loss)
else:
model, model_eval, time_step = \
model_builder.build(experiment_spec,
kernel_regularizer=kr,
bias_regularizer=br)
print("Initialize optimizer")
model.compile(optimizer=hvd.DistributedOptimizer(optimizer),
loss=ctc_loss.compute_loss)
# build train / eval dataset:
train_data = LPRNetDataGenerator(experiment_spec=experiment_spec,
is_training=True,
shuffle=True,
time_step=time_step)
val_data = LPRNetDataGenerator(experiment_spec=experiment_spec,
is_training=False,
shuffle=False)
# build learning rate scheduler
lrconfig = experiment_spec.training_config.learning_rate.soft_start_annealing_schedule
total_num = train_data.n_samples
iters_per_epoch = int(ceil(total_num / batch_size_per_gpu) // hvd.size())
max_iterations = num_epochs * iters_per_epoch
lr_scheduler = LRS(base_lr=lrconfig.max_learning_rate * hvd.size(),
min_lr_ratio=lrconfig.min_learning_rate / lrconfig.max_learning_rate,
soft_start=lrconfig.soft_start,
annealing_start=lrconfig.annealing,
max_iterations=max_iterations)
init_step = (init_epoch - 1) * iters_per_epoch
lr_scheduler.reset(init_step)
terminate_on_nan = tf.keras.callbacks.TerminateOnNaN()
callbacks = [hvd.callbacks.BroadcastGlobalVariablesCallback(0),
hvd.callbacks.MetricAverageCallback(),
lr_scheduler,
terminate_on_nan]
# build logger and checkpoint saver for master GPU:
if hvd.rank() == 0:
model.summary()
logger.info("Number of images in the training dataset:\t{:>6}"
.format(train_data.n_samples))
logger.info("Number of images in the validation dataset:\t{:>6}"
.format(val_data.n_samples))
if not os.path.exists(os.path.join(results_dir, 'weights')):
os.mkdir(os.path.join(results_dir, 'weights'))
ckpt_path = os.path.join(results_dir, 'weights',
"lprnet_epoch-{epoch:03d}.hdf5")
model_checkpoint = KerasModelSaver(ckpt_path, key, ckpt_interval, last_epoch=num_epochs,
verbose=verbose)
callbacks.append(model_checkpoint)
status_logger = TAOStatusLogger(
results_dir,
append=True,
num_epochs=num_epochs,
is_master=hvd.rank() == 0,
)
callbacks.append(status_logger)
if val_data.n_samples > 0:
eval_interval = experiment_spec.eval_config.validation_period_during_training
tf.keras.backend.set_learning_phase(0)
ac_checkpoint = ac_callback(eval_model=model_eval,
eval_interval=eval_interval,
val_dataset=val_data,
verbose=verbose)
callbacks.append(ac_checkpoint)
tf.keras.backend.set_learning_phase(1)
csv_logger = tf.keras.callbacks.CSVLogger(filename=os.path.join(results_dir,
"lprnet_training_log.csv"),
separator=",",
append=False)
callbacks.append(csv_logger)
# init early stopping:
if experiment_spec.training_config.HasField("early_stopping"):
es_config = experiment_spec.training_config.early_stopping
es_cb = tf.keras.callbacks.EarlyStopping(monitor=es_config.monitor,
min_delta=es_config.min_delta,
patience=es_config.patience,
verbose=True)
callbacks.append(es_cb)
if hvd.rank() == 0:
if experiment_spec.training_config.visualizer.enabled:
tb_log_dir = os.path.join(results_dir, "events")
tb_cb = tf.keras.callbacks.TensorBoard(tb_log_dir, write_graph=False)
callbacks.append(tb_cb)
tbimg_cb = LPRNetTensorBoardImage(tb_log_dir,
experiment_spec.training_config.visualizer.num_images)
fetches = [tf.assign(tbimg_cb.img, model.inputs[0], validate_shape=False)]
model._function_kwargs = {'fetches': fetches}
callbacks.append(tbimg_cb)
train_steps = int(ceil(train_data.n_samples / batch_size_per_gpu) // hvd.size())
model.fit(x=train_data,
steps_per_epoch=train_steps,
epochs=num_epochs,
callbacks=callbacks,
workers=4,
use_multiprocessing=False,
initial_epoch=init_epoch - 1,
verbose=verbose
)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Training finished successfully."
)
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(
description='Train a LPRNet'
)
parser.add_argument(
'-e',
'--experiment_spec_file',
type=str,
required=True,
help='Path to spec file. Absolute path or relative to working directory. \
If not specified, default spec from spec_loader.py is used.')
parser.add_argument(
'-r',
'--results_dir',
type=str,
required=True,
help='Path to a folder where experiment outputs should be written.'
)
parser.add_argument(
'-k',
'--key',
default="",
type=str,
required=False,
help='Key to save or load a .tlt model.'
)
parser.add_argument(
'-m',
'--resume_model_weights',
type=str,
default=None,
help='Path to a model to continue training.'
)
parser.add_argument(
'--initial_epoch',
type=int,
default=1,
help='Set resume epoch'
)
return parser
def parse_command_line_arguments(args=None):
"""
Parse command-line flags passed to the training script.
Returns:
Namespace with all parsed arguments.
"""
parser = build_command_line_parser()
return parser.parse_args(args)
def main(args=None):
"""Run the training process."""
args = parse_command_line_arguments(args)
try:
run_experiment(config_path=args.experiment_spec_file,
results_dir=args.results_dir,
resume_weights=args.resume_model_weights,
init_epoch=args.initial_epoch,
key=args.key)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Training was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
logger.info("Training was interrupted.")
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/scripts/train.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple Stand-alone inference script for LPRNet model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
import sys
import cv2
import tensorflow as tf
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.lprnet.models import eval_builder
from nvidia_tao_tf1.cv.lprnet.utils.ctc_decoder import decode_ctc_conf
from nvidia_tao_tf1.cv.lprnet.utils.img_utils import preprocess
from nvidia_tao_tf1.cv.lprnet.utils.model_io import load_model
from nvidia_tao_tf1.cv.lprnet.utils.spec_loader import (
INFERENCE_EXP_REQUIRED_MSG,
load_experiment_spec,
spec_validator
)
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level='INFO')
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(
description='LPRNet Inference Tool'
)
parser.add_argument('-m',
'--model_path',
type=str,
required=True,
help='Path to a TLT model or a TensorRT engine')
parser.add_argument('-i',
'--image_dir',
required=True,
type=str,
help='The path to input image or directory.')
parser.add_argument('-k',
'--key',
default="",
type=str,
help='Key to save or load a .tlt model. Must present if -m is a TLT model')
parser.add_argument('-e',
'--experiment_spec',
required=True,
type=str,
help='Path to an experiment spec file for training.')
parser.add_argument('--trt',
action='store_true',
help='Use TensorRT engine for inference.')
parser.add_argument('-r',
'--results_dir',
type=str,
help='Path to a folder where the logs are stored.')
parser.add_argument('-b',
'--batch_size',
type=int,
required=False,
default=1,
help=argparse.SUPPRESS)
return parser
def parse_command_line(args):
'''Parse command line arguments.'''
parser = build_command_line_parser()
return parser.parse_args(args)
def inference(arguments):
'''make inference.'''
results_dir = arguments.results_dir
if results_dir is not None:
if not os.path.exists(results_dir):
os.makedirs(results_dir)
log_dir = results_dir
else:
log_dir = os.path.dirname(arguments.model_path)
status_file = os.path.join(log_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
verbosity=1,
append=True
)
)
s_logger = status_logging.get_status_logger()
s_logger.write(
status_level=status_logging.Status.STARTED,
message="Starting LPRNet Inference."
)
config_path = arguments.experiment_spec
experiment_spec = load_experiment_spec(config_path)
spec_validator(experiment_spec, INFERENCE_EXP_REQUIRED_MSG)
characters_list_file = experiment_spec.dataset_config.characters_list_file
with open(characters_list_file, "r") as f:
temp_list = f.readlines()
classes = [i.strip() for i in temp_list]
blank_id = len(classes)
output_width = experiment_spec.augmentation_config.output_width
output_height = experiment_spec.augmentation_config.output_height
output_channel = experiment_spec.augmentation_config.output_channel
batch_size = experiment_spec.eval_config.batch_size
input_shape = (batch_size, output_channel, output_height, output_width)
if os.path.splitext(arguments.model_path)[1] in ['.tlt', '.hdf5']:
tf.keras.backend.clear_session() # Clear previous models from memory.
tf.keras.backend.set_learning_phase(0)
model = load_model(model_path=arguments.model_path,
max_label_length=experiment_spec.lpr_config.max_label_length,
key=arguments.key)
# Build evaluation model
model = eval_builder.build(model)
print("Using TLT model for inference, setting batch size to the one in eval_config:",
experiment_spec.eval_config.batch_size)
elif arguments.trt:
from nvidia_tao_tf1.cv.common.inferencer.trt_inferencer import TRTInferencer
trt_inf = TRTInferencer(arguments.model_path, input_shape)
print("Using TRT engine for inference, setting batch size to the one in eval_config:",
experiment_spec.eval_config.batch_size)
else:
print("Unsupported model type: {}".format(os.path.splitext(arguments.model_path)[1]))
sys.exit()
image_file_list = [os.path.join(arguments.image_dir, file_name)
for file_name in os.listdir(arguments.image_dir)]
batch_cnt = int(len(image_file_list) / batch_size)
for idx in range(batch_cnt):
# prepare data:
batch_image_list = image_file_list[idx * batch_size: (idx + 1) * batch_size]
batch_images = [cv2.imread(image_file) for image_file in batch_image_list]
batch_images = preprocess(batch_images,
output_width=output_width,
output_height=output_height,
is_training=False)
# predict:
if arguments.trt:
prediction = trt_inf.infer_batch(batch_images)
else:
prediction = model.predict(x=batch_images, batch_size=batch_size)
# decode prediction
decoded_lp, _ = decode_ctc_conf(prediction,
classes=classes,
blank_id=blank_id)
for image_name, decoded_lp in zip(batch_image_list, decoded_lp):
print("{}:{} ".format(image_name, decoded_lp))
s_logger.write(
status_level=status_logging.Status.SUCCESS,
message="Inference finished successfully."
)
def main(args=None):
"""Run the inference process."""
try:
args = parse_command_line(args)
inference(args)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Inference was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple Stand-alone evaluate script for LPRNet model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
import sys
import tensorflow as tf
from tqdm import trange
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.lprnet.dataloader.data_sequence import LPRNetDataGenerator
from nvidia_tao_tf1.cv.lprnet.models import eval_builder
from nvidia_tao_tf1.cv.lprnet.utils.ctc_decoder import decode_ctc_conf
from nvidia_tao_tf1.cv.lprnet.utils.model_io import load_model
from nvidia_tao_tf1.cv.lprnet.utils.spec_loader import (
EVAL_EXP_REQUIRED_MSG,
load_experiment_spec,
spec_validator
)
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level='INFO')
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(
description='Evaluate a LPRNet model.'
)
parser.add_argument('-m',
'--model_path',
help='Path to an TLT model or TensorRT engine.',
required=True,
type=str)
parser.add_argument('-k',
'--key',
default="",
type=str,
help='Key to save or load a .tlt model.')
parser.add_argument('-e',
'--experiment_spec',
required=True,
type=str,
help='Experiment spec file for training and evaluation.')
parser.add_argument('--trt',
action='store_true',
help='Use TensorRT engine for evaluation.')
parser.add_argument('-r',
'--results_dir',
type=str,
help='Path to a folder where the logs are stored.')
parser.add_argument('-b',
'--batch_size',
type=int,
required=False,
default=1,
help=argparse.SUPPRESS)
return parser
def parse_command_line(args):
'''Parse command line arguments.'''
parser = build_command_line_parser()
return parser.parse_args(args)
def evaluate(arguments):
'''make evaluation.'''
results_dir = arguments.results_dir
if results_dir is not None:
if not os.path.exists(results_dir):
os.makedirs(results_dir)
log_dir = results_dir
else:
log_dir = os.path.dirname(arguments.model_path)
status_file = os.path.join(log_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
verbosity=1,
append=True
)
)
s_logger = status_logging.get_status_logger()
s_logger.write(
status_level=status_logging.Status.STARTED,
message="Starting LPRNet evaluation."
)
config_path = arguments.experiment_spec
experiment_spec = load_experiment_spec(config_path)
spec_validator(experiment_spec, EVAL_EXP_REQUIRED_MSG)
characters_list_file = experiment_spec.dataset_config.characters_list_file
with open(characters_list_file, "r") as f:
temp_list = f.readlines()
classes = [i.strip() for i in temp_list]
blank_id = len(classes)
output_width = experiment_spec.augmentation_config.output_width
output_height = experiment_spec.augmentation_config.output_height
output_channel = experiment_spec.augmentation_config.output_channel
batch_size = experiment_spec.eval_config.batch_size
input_shape = (batch_size, output_channel, output_height, output_width)
if os.path.splitext(arguments.model_path)[1] in ['.tlt', '.hdf5']:
tf.keras.backend.clear_session() # Clear previous models from memory.
tf.keras.backend.set_learning_phase(0)
model = load_model(model_path=arguments.model_path,
max_label_length=experiment_spec.lpr_config.max_label_length,
key=arguments.key)
# Build evaluation model
model = eval_builder.build(model)
print("Using TLT model for inference, setting batch size to the one in eval_config:",
experiment_spec.eval_config.batch_size)
elif arguments.trt:
from nvidia_tao_tf1.cv.common.inferencer.trt_inferencer import TRTInferencer
trt_inf = TRTInferencer(arguments.model_path, input_shape)
print("Using TRT engine for inference, setting batch size to the one in eval_config:",
experiment_spec.eval_config.batch_size)
else:
print("Unsupported model type: {}".format(os.path.splitext(arguments.model_path)[1]))
sys.exit()
batch_size = experiment_spec.eval_config.batch_size
val_data = LPRNetDataGenerator(experiment_spec=experiment_spec,
is_training=False,
shuffle=False)
tr = trange(len(val_data), file=sys.stdout)
tr.set_description('Producing predictions')
total_cnt = val_data.n_samples
correct = 0
for idx in tr:
# prepare data:
batch_x, batch_y = val_data[idx]
# predict:
if arguments.trt:
prediction = trt_inf.infer_batch(batch_x)
else:
prediction = model.predict(x=batch_x, batch_size=batch_size)
# decode prediction
decoded_lp, _ = decode_ctc_conf(prediction,
classes=classes,
blank_id=blank_id)
for idx, lp in enumerate(decoded_lp):
if lp == batch_y[idx]:
correct += 1
acc = float(correct)/float(total_cnt)
print("Accuracy: {} / {} {}".format(correct, total_cnt,
acc))
s_logger.kpi.update({'Accuracy': acc})
s_logger.write(
status_level=status_logging.Status.SUCCESS,
message="Evaluation finished successfully."
)
def main(args=None):
"""Run the evaluation process."""
try:
args = parse_command_line(args)
evaluate(args)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Evaluation was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/scripts/evaluate.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""TLT command line wrapper to invoke CLI scripts."""
import sys
from nvidia_tao_tf1.cv.common.entrypoint.entrypoint import launch_job
import nvidia_tao_tf1.cv.lprnet.scripts
def main():
"""Function to launch the job."""
launch_job(nvidia_tao_tf1.cv.lprnet.scripts, "lprnet", sys.argv[1:])
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/entrypoint/lprnet.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Tools to convert datasets into .tfrecords."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/entrypoint/__init__.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/experiment_specs/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to export a trained LPRNet model to an ETLT file for deployment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/export/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class to export trained .tlt models to etlt file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import tempfile
import tensorflow as tf
os.environ["TF_KERAS"] = "1"
from nvidia_tao_tf1.core.export._onnx import keras_to_onnx # noqa
from nvidia_tao_tf1.cv.common.export.keras_exporter import KerasExporter as Exporter # noqa
from nvidia_tao_tf1.cv.lprnet.models import eval_builder # noqa
from nvidia_tao_tf1.cv.lprnet.utils.model_io import load_model # noqa
from nvidia_tao_tf1.cv.lprnet.utils.spec_loader import load_experiment_spec, spec_validator, EXPORT_EXP_REQUIRED_MSG # noqa
logger = logging.getLogger(__name__)
class LPRNetExporter(Exporter):
"""Exporter class to export a trained LPRNet model."""
def __init__(self, model_path=None,
key=None,
data_type="fp32",
strict_type=False,
experiment_spec_path="",
backend="onnx",
**kwargs):
"""Instantiate the LPRNet exporter to export a trained LPRNet .tlt model.
Args:
model_path(str): Path to the LPRNet model file.
key (str): Key to decode the model.
data_type (str): Backend data-type for the optimized TensorRT engine.
strict_type(bool): Apply TensorRT strict_type_constraints or not for INT8 mode.
experiment_spec_path (str): Path to LPRNet experiment spec file.
backend (str): Type of intermediate backend parser to be instantiated.
"""
super(LPRNetExporter, self).__init__(model_path=model_path,
key=key,
data_type=data_type,
strict_type=strict_type,
backend=backend,
**kwargs)
self.experiment_spec_path = experiment_spec_path
assert os.path.isfile(self.experiment_spec_path), \
"Experiment spec file not found at {}.".format(self.experiment_spec_path)
self.experiment_spec = None
def load_model(self, backend="onnx"):
"""Simple function to load the LPRNet Keras model."""
experiment_spec = load_experiment_spec(self.experiment_spec_path)
spec_validator(experiment_spec, EXPORT_EXP_REQUIRED_MSG)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
tf.keras.backend.set_session(sess)
tf.keras.backend.clear_session() # Clear previous models from memory.
tf.keras.backend.set_learning_phase(0)
model = load_model(model_path=self.model_path,
max_label_length=experiment_spec.lpr_config.max_label_length,
key=self.key)
# Build evaluation model
model = eval_builder.build(model)
self.experiment_spec = experiment_spec
return model
def save_exported_file(self, model, output_file_name):
"""Save the exported model file.
This routine converts a keras model to onnx/uff model
based on the backend the exporter was initialized with.
Args:
model (keras.model.Model): Decoded keras model to be exported.
output_file_name (str): Path to the output file.
Returns:
tmp_uff_file (str): Path to the temporary uff file.
"""
if self.backend == "onnx":
keras_to_onnx(
model,
output_file_name,
target_opset=self.target_opset)
return output_file_name
raise NotImplementedError("Invalid backend provided. {}".format(self.backend))
def set_input_output_node_names(self):
"""Set input output node names."""
self.output_node_names = ["tf_op_layer_ArgMax", "tf_op_layer_Max"]
self.input_node_names = ["image_input"]
def set_data_preprocessing_parameters(self, input_dims, image_mean=None):
"""Set data pre-processing parameters for the int8 calibration."""
num_channels = input_dims[0]
if num_channels == 3:
means = [0, 0, 0]
elif num_channels == 1:
means = [0]
else:
raise NotImplementedError("Invalid number of dimensions {}.".format(num_channels))
self.preprocessing_arguments = {"scale": 1.0 / 255.0,
"means": means,
"flip_channel": True}
def get_input_dims_from_model(self, model=None):
"""Read input dimensions from the model.
Args:
model (keras.models.Model): Model to get input dimensions from.
Returns:
input_dims (tuple): Input dimensions.
"""
if model is None:
raise IOError("Invalid model object.")
input_dims = model.layers[1].input_shape[1:]
return input_dims
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/export/lprnet_exporter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test lprnet export."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import pytest
import tensorflow as tf
import nvidia_tao_tf1.cv.lprnet.models.model_builder as model_builder
from nvidia_tao_tf1.cv.lprnet.utils.model_io import save_model
from nvidia_tao_tf1.cv.lprnet.utils.spec_loader import load_experiment_spec
backbone_configs = [('baseline', 10, "fp32"),
('baseline', 18, "fp16")]
@pytest.fixture
def _spec_file():
'''default spec file.'''
parent_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
return os.path.join(parent_dir, 'experiment_specs/default_spec.txt')
@pytest.fixture
def spec():
experiment_spec = load_experiment_spec(merge_from_default=True)
label = "abcdefg"
with open("tmp_ch_list.txt", "w") as f:
for ch in label:
f.write(ch + "\n")
experiment_spec.dataset_config.characters_list_file = "tmp_ch_list.txt"
yield experiment_spec
os.remove("tmp_ch_list.txt")
@pytest.mark.skipif(os.getenv("RUN_ON_CI", "0") == "1", reason="Cannot be run on CI")
@pytest.mark.script_launch_mode('subprocess')
@pytest.mark.parametrize("model_type, nlayers, data_type",
backbone_configs)
def test_export(script_runner, spec, _spec_file, model_type,
nlayers, data_type):
spec.lpr_config.arch = model_type
spec.lpr_config.nlayers = nlayers
# pin GPU ID 0 so it uses the newest GPU ARCH for INT8
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
tf.keras.backend.set_session(sess)
enc_key = 'nvidia_tlt'
tf.keras.backend.clear_session()
model, _, _ = model_builder.build(spec)
os_handle, tmp_keras_model = tempfile.mkstemp(suffix=".hdf5")
os.close(os_handle)
save_model(model, tmp_keras_model, enc_key.encode())
os_handle, tmp_exported_model = tempfile.mkstemp(suffix=".onnx")
os.close(os_handle)
os.remove(tmp_exported_model)
tf.reset_default_graph()
del model
# export to etlt model
script = 'nvidia_tao_tf1/cv/lprnet/scripts/export.py'
env = os.environ.copy()
# 1. export in FP32 mode
if data_type == "fp32":
args = ['-m', tmp_keras_model,
'-k', enc_key,
'--experiment_spec', _spec_file,
'-o', tmp_exported_model]
ret = script_runner.run(script, env=env, *args)
# before abort, remove the created temp files when exception raises
try:
assert ret.success
assert os.path.isfile(tmp_exported_model)
if os.path.exists(tmp_exported_model):
os.remove(tmp_exported_model)
except AssertionError:
# if the script runner failed, the tmp_exported_model may not be created at all
if os.path.exists(tmp_exported_model):
os.remove(tmp_exported_model)
os.remove(tmp_keras_model)
raise(AssertionError(ret.stdout + ret.stderr))
# 2. export in FP16 mode
if data_type == "fp16":
args = ['-m', tmp_keras_model,
'-k', enc_key,
'--experiment_spec', _spec_file,
'-o', tmp_exported_model,
'--data_type', 'fp16']
ret = script_runner.run(script, env=env, *args)
try:
assert ret.success
assert os.path.isfile(tmp_exported_model)
if os.path.exists(tmp_exported_model):
os.remove(tmp_exported_model)
except AssertionError:
if os.path.exists(tmp_exported_model):
os.remove(tmp_exported_model)
os.remove(tmp_keras_model)
raise(AssertionError(ret.stdout + ret.stderr))
# clear the tmp files
if os.path.exists(tmp_exported_model):
os.remove(tmp_exported_model)
if os.path.exists(tmp_keras_model):
os.remove(tmp_keras_model)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/export/tests/test_export.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/dataloader/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TLT LPRNet data sequence."""
import logging
import math
import os
import random
import cv2
import numpy as np
from tensorflow.compat.v1.keras.utils import Sequence
from nvidia_tao_tf1.cv.lprnet.utils.img_utils import preprocess
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level='INFO')
logger = logging.getLogger(__name__)
DEFAULT_STRIDE = 4
class LPRNetDataGenerator(Sequence):
"""Data generator for license plate dataset."""
def __init__(self, experiment_spec, is_training=True, shuffle=True, time_step=None):
"""initialize data generator."""
self.image_paths = []
self.label_paths = []
if is_training:
data_sources = experiment_spec.dataset_config.data_sources
self.batch_size = experiment_spec.training_config.batch_size_per_gpu
else:
data_sources = experiment_spec.dataset_config.validation_data_sources
self.batch_size = experiment_spec.eval_config.batch_size
for data_source in data_sources:
self._add_source(data_source)
self.data_inds = np.arange(len(self.image_paths))
self.is_training = is_training
self.n_samples = len(self.image_paths)
self.output_width = experiment_spec.augmentation_config.output_width
self.output_height = experiment_spec.augmentation_config.output_height
self.output_channel = experiment_spec.augmentation_config.output_channel
self.keep_original_prob = experiment_spec.augmentation_config.keep_original_prob
self.max_rotate_degree = experiment_spec.augmentation_config.max_rotate_degree
self.rotate_prob = experiment_spec.augmentation_config.rotate_prob
self.gaussian_kernel_size = list(experiment_spec.augmentation_config.gaussian_kernel_size)
self.blur_prob = experiment_spec.augmentation_config.blur_prob
self.reverse_color_prob = experiment_spec.augmentation_config.reverse_color_prob
# Load the characters list:
characters_list_file = experiment_spec.dataset_config.characters_list_file
with open(characters_list_file, "r") as f:
temp_list = f.readlines()
classes = [i.strip() for i in temp_list]
self.class_dict = {classes[index]: index for index in range(len(classes))}
self.classes = classes
self.time_step = time_step
self.max_label_length = experiment_spec.lpr_config.max_label_length
suggest_width = (self.max_label_length * 2 + 1) * DEFAULT_STRIDE
if self.output_width < suggest_width:
logger.info("To avoid NaN loss, " +
"please set the output_width >= {}. ".format(suggest_width) +
"And then restart the training.")
exit()
self.shuffle = shuffle
self.on_epoch_end()
def _add_source(self, data_source):
"""Add image/label paths."""
img_files = os.listdir(data_source.image_directory_path)
label_files = set(os.listdir(data_source.label_directory_path))
supported_img_format = ['.jpg', '.jpeg', '.png', '.bmp', '.gif']
for img_file in img_files:
file_name, img_ext = os.path.splitext(img_file)
if img_ext in supported_img_format and file_name + ".txt" in label_files:
self.image_paths.append(os.path.join(data_source.image_directory_path,
img_file))
self.label_paths.append(os.path.join(data_source.label_directory_path,
file_name+".txt"))
def __len__(self):
"""return number of batches in dataset."""
return int(math.ceil(len(self.image_paths)/self.batch_size))
def __getitem__(self, idx):
"""preprare processed data for training and evaluation."""
begin_id = idx*self.batch_size
end_id = min(len(self.data_inds), (idx+1)*self.batch_size)
batch_x_file_list = [self.image_paths[i] for i in
self.data_inds[begin_id:end_id]]
batch_y_file_list = [self.label_paths[i] for i in
self.data_inds[begin_id:end_id]]
read_flag = cv2.IMREAD_COLOR
if self.output_channel == 1:
read_flag = cv2.IMREAD_GRAYSCALE
batch_x = [np.array(cv2.imread(file_name, read_flag), dtype=np.float32)
for file_name in batch_x_file_list]
# preprocess the image batch
batch_x = preprocess(batch_x,
is_training=self.is_training,
output_width=self.output_width,
output_height=self.output_height,
output_channel=self.output_channel,
keep_original_prob=self.keep_original_prob,
max_rotate_degree=self.max_rotate_degree,
rotate_prob=self.rotate_prob,
gaussian_kernel_size=self.gaussian_kernel_size,
blur_prob=self.blur_prob,
reverse_color_prob=self.reverse_color_prob)
# preprare sequence labels
if self.is_training:
batch_y = []
batch_input_length = []
batch_label_length = []
for file_name in batch_y_file_list:
with open(file_name, "r") as f:
label_line = f.readline().strip()
label = np.array([self.class_dict[char] for char in label_line])
batch_input_length.append(self.time_step)
batch_label_length.append(len(label))
batch_y.append(np.pad(label, (0, self.max_label_length - len(label))))
batch_y = np.array(batch_y)
batch_input_length = np.array(batch_input_length)
batch_input_length = batch_input_length[:, np.newaxis]
batch_label_length = np.array(batch_label_length)
batch_label_length = batch_label_length[:, np.newaxis]
batch_final_label = np.concatenate((batch_y, batch_input_length, batch_label_length),
axis=-1)
else:
batch_y = []
for file_name in batch_y_file_list:
with open(file_name, "r") as f:
label = f.readline().strip()
batch_y.append(label)
batch_final_label = batch_y
return batch_x, batch_final_label
def on_epoch_end(self):
"""shuffle the dataset on epoch end."""
if self.shuffle is True:
random.shuffle(self.data_inds)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/dataloader/data_sequence.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test lprnet keras sequence dataloader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import numpy as np
from PIL import Image
import pytest
from nvidia_tao_tf1.cv.lprnet.dataloader.data_sequence import LPRNetDataGenerator
from nvidia_tao_tf1.cv.lprnet.utils.spec_loader import load_experiment_spec
@pytest.fixture
def _test_experiment_spec():
img = np.random.randint(low=0, high=255, size=(52, 105, 3), dtype=np.uint8)
gt = "3H0X429"
experiment_spec = load_experiment_spec(merge_from_default=True)
if not os.path.exists("tmp_labels/"):
os.mkdir("tmp_labels/")
with open("tmp_labels/0.txt", "w") as f:
f.write(gt)
if not os.path.exists("tmp_imgs/"):
os.mkdir("tmp_imgs/")
tmp_im = Image.fromarray(img)
tmp_im.save("tmp_imgs/0.jpg")
with open("tmp_ch_list_data.txt", "w") as f:
for ch in gt:
f.write(ch + "\n")
experiment_spec.dataset_config.data_sources[0].label_directory_path = "tmp_labels/"
experiment_spec.dataset_config.data_sources[0].image_directory_path = "tmp_imgs/"
experiment_spec.dataset_config.validation_data_sources[0].label_directory_path = "tmp_labels/"
experiment_spec.dataset_config.validation_data_sources[0].image_directory_path = "tmp_imgs/"
experiment_spec.dataset_config.characters_list_file = "tmp_ch_list_data.txt"
experiment_spec.training_config.batch_size_per_gpu = 1
yield experiment_spec
shutil.rmtree("tmp_labels")
shutil.rmtree("tmp_imgs")
os.remove("tmp_ch_list_data.txt")
def test_data_sequence(_test_experiment_spec):
train_data = LPRNetDataGenerator(experiment_spec=_test_experiment_spec,
is_training=True,
shuffle=True,
time_step=24)
val_data = LPRNetDataGenerator(experiment_spec=_test_experiment_spec,
is_training=False,
shuffle=True,
time_step=24)
assert len(train_data) == 1
train_im, train_label = train_data[0]
val_im, val_label = val_data[0]
assert train_label[0].shape[-1] == 10
assert len(val_label[0]) == 7
assert train_im[0].shape == (3, 48, 96)
assert val_im[0].shape == (3, 48, 96)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/lprnet/dataloader/tests/test_data_sequence.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO SSD root module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/__init__.py |
"""SSD entry point."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from maglev_sdk.docker_container.entrypoint import main
if __name__ == '__main__':
main('ssd', 'nvidia_tao_tf1/cv/ssd/scripts')
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/docker/ssd.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/layers/__init__.py |
|
'''
A custom Keras layer to generate anchor boxes.
Copyright (C) 2019 Pierluigi Ferrari
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from __future__ import division
import keras.backend as K
from keras.engine.topology import InputSpec, Layer
import numpy as np
import tensorflow as tf
from nvidia_tao_tf1.cv.ssd.utils.box_utils import np_convert_coordinates
class AnchorBoxes(Layer):
'''
AnchorBoxes layer.
This is a keras custom layer for SSD. Code is from GitHub and with Apache-2 license. Link:
https://github.com/pierluigiferrari/ssd_keras/tree/3ac9adaf3889f1020d74b0eeefea281d5e82f353
A Keras layer to create an output tensor containing anchor box coordinates
and variances based on the input tensor and the passed arguments.
A set of 2D anchor boxes of different aspect ratios is created for each spatial unit of
the input tensor. The number of anchor boxes created per unit depends on the arguments
`aspect_ratios` and `two_boxes_for_ar1`, in the default case it is 4. The boxes
are parameterized by the coordinate tuple `(xmin, xmax, ymin, ymax)`.
The logic implemented by this layer is identical to the logic in the module
`ssd_box_encode_decode_utils.py`.
The purpose of having this layer in the network is to make the model self-sufficient
at inference time. Since the model is predicting offsets to the anchor boxes
(rather than predicting absolute box coordinates directly), one needs to know the anchor
box coordinates in order to construct the final prediction boxes from the predicted offsets.
If the model's output tensor did not contain the anchor box coordinates, the necessary
information to convert the predicted offsets back to absolute coordinates would be missing
in the model output. The reason why it is necessary to predict offsets to the anchor boxes
rather than to predict absolute box coordinates directly is explained in `README.md`.
Input shape:
4D tensor of shape `(batch, channels, height, width)` if `dim_ordering = 'th'`
or `(batch, height, width, channels)` if `dim_ordering = 'tf'`.
Output shape:
5D tensor of shape `(batch, height, width, n_boxes, 8)`. The last axis contains
the four anchor box coordinates and the four variance values for each box.
'''
def __init__(self,
img_height,
img_width,
this_scale,
next_scale,
aspect_ratios=None,
two_boxes_for_ar1=True,
this_steps=None,
this_offsets=None,
clip_boxes=False,
variances=None,
**kwargs):
'''
init function.
All arguments need to be set to the same values as in the box encoding process,
otherwise the behavior is undefined. Some of these arguments are explained in
more detail in the documentation of the `SSDBoxEncoder` class.
Arguments:
img_height (int): The height of the input images.
img_width (int): The width of the input images.
this_scale (float): A float in [0, 1], the scaling factor for the size of
the generated anchor boxes as a fraction of the shorter side of the input image.
next_scale (float): A float in [0, 1], the next larger scaling factor. Only
relevant if self.two_boxes_for_ar1 == True`.
aspect_ratios (list, optional): The list of aspect ratios for which default
boxes are to be generated for this layer.
two_boxes_for_ar1 (bool, optional): Only relevant if `aspect_ratios` contains 1.
If `True`, two default boxes will be generated for aspect ratio 1. The first
will be generated using the scaling factor for the respective layer, the second
one will be generated using geometric mean of said scaling factor and next bigger
scaling factor.
clip_boxes (bool, optional): If `True`, clips the anchor box coordinates to stay within
image boundaries.
variances (list, optional): A list of 4 floats >0. The anchor box offset for
each coordinate will be divided by its respective variance value.
'''
if K.backend() != 'tensorflow':
raise TypeError("This layer only supports TF at the moment, but you are using the {}."
.format(K.backend()))
if (this_scale < 0) or (next_scale < 0) or (this_scale > 1):
raise ValueError("`this_scale` must be in [0, 1] and `next_scale` must be >0, \
but `this_scale` == {}, `next_scale` == {}"
.format(this_scale, next_scale))
if len(variances) != 4:
raise ValueError("4 variance values must be pased, but {} values were received."
.format(len(variances)))
variances = np.array(variances)
if np.any(variances <= 0):
raise ValueError("All variances must be >0, but the variances given are {}"
.format(variances))
self.img_height = img_height
self.img_width = img_width
self.this_scale = this_scale
self.next_scale = next_scale
self.aspect_ratios = aspect_ratios
self.two_boxes_for_ar1 = two_boxes_for_ar1
self.this_steps = this_steps
self.this_offsets = this_offsets
self.clip_boxes = clip_boxes
self.variances = variances
# Compute the number of boxes per cell
if (1 in aspect_ratios) and two_boxes_for_ar1:
self.n_boxes = len(aspect_ratios) + 1
else:
self.n_boxes = len(aspect_ratios)
super(AnchorBoxes, self).__init__(**kwargs)
def build(self, input_shape):
"""Layer build function."""
self.input_spec = [InputSpec(shape=input_shape)]
size = min(self.img_height, self.img_width)
# Compute the box widths and and heights for all aspect ratios
wh_list = []
for ar in self.aspect_ratios:
if (ar == 1):
# Compute the regular anchor box for aspect ratio 1.
box_height = box_width = self.this_scale * size
wh_list.append((box_width, box_height))
if self.two_boxes_for_ar1:
# Compute one slightly larger version using the geometric mean.
box_height = box_width = np.sqrt(self.this_scale * self.next_scale) * size
wh_list.append((box_width, box_height))
else:
box_height = self.this_scale * size / np.sqrt(ar)
box_width = self.this_scale * size * np.sqrt(ar)
wh_list.append((box_width, box_height))
wh_list = np.array(wh_list)
_, _, feature_map_height, feature_map_width = input_shape
# Compute the grid of box center points. They are identical for all aspect ratios.
# Compute the step sizes
if (self.this_steps is None):
step_height = self.img_height / feature_map_height
step_width = self.img_width / feature_map_width
else:
if isinstance(self.this_steps, (list, tuple)) and (len(self.this_steps) == 2):
step_height = self.this_steps[0]
step_width = self.this_steps[1]
elif isinstance(self.this_steps, (int, float)):
step_height = self.this_steps
step_width = self.this_steps
# Compute the offsets.
if (self.this_offsets is None):
offset_height = 0.5
offset_width = 0.5
else:
if isinstance(self.this_offsets, (list, tuple)) and (len(self.this_offsets) == 2):
offset_height = self.this_offsets[0]
offset_width = self.this_offsets[1]
elif isinstance(self.this_offsets, (int, float)):
offset_height = self.this_offsets
offset_width = self.this_offsets
# Now that we have the offsets and step sizes, compute the grid of anchor box center points.
cy = np.linspace(offset_height * step_height,
(offset_height + feature_map_height - 1) * step_height, feature_map_height)
cx = np.linspace(offset_width * step_width,
(offset_width + feature_map_width - 1) * step_width, feature_map_width)
cx_grid, cy_grid = np.meshgrid(cx, cy)
cx_grid = np.expand_dims(cx_grid, -1) # This is necessary for np.tile()
cy_grid = np.expand_dims(cy_grid, -1) # This is necessary for np.tile()
# Create a 4D tensor template of shape `(feature_map_height, feature_map_width, n_boxes, 4)`
# where the last dimension will contain `(cx, cy, w, h)`
boxes_tensor = np.zeros((feature_map_height, feature_map_width, self.n_boxes, 4))
boxes_tensor[:, :, :, 0] = np.tile(cx_grid, (1, 1, self.n_boxes)) # Set cx
boxes_tensor[:, :, :, 1] = np.tile(cy_grid, (1, 1, self.n_boxes)) # Set cy
boxes_tensor[:, :, :, 2] = wh_list[:, 0] # Set w
boxes_tensor[:, :, :, 3] = wh_list[:, 1] # Set h
# Convert `(cx, cy, w, h)` to `(xmin, xmax, ymin, ymax)`
boxes_tensor = np_convert_coordinates(boxes_tensor, start_index=0,
conversion='centroids2corners')
# If `clip_boxes` is enabled, clip the coordinates to lie within the image boundaries
if self.clip_boxes:
x_coords = boxes_tensor[:, :, :, [0, 2]]
x_coords[x_coords >= self.img_width] = self.img_width - 1
x_coords[x_coords < 0] = 0
boxes_tensor[:, :, :, [0, 2]] = x_coords
y_coords = boxes_tensor[:, :, :, [1, 3]]
y_coords[y_coords >= self.img_height] = self.img_height - 1
y_coords[y_coords < 0] = 0
boxes_tensor[:, :, :, [1, 3]] = y_coords
boxes_tensor[:, :, :, [0, 2]] /= self.img_width
boxes_tensor[:, :, :, [1, 3]] /= self.img_height
# AnchorBox layer will output `(xmin,ymin,xmax,ymax)`. The ground truth is
# `(cx,cy,logw,logh)`. However, we don't need to further convert to centroids here since
# this layer will not carry any gradient backprob. The following command will do the
# convertion if we eventually want.
# boxes_tensor = np_convert_coordinates(boxes_tensor,
# start_index=0, conversion='corners2centroids')
# Create a tensor to contain the variances and append it to `boxes_tensor`.
# This tensor has the same shape as `boxes_tensor` and simply contains the same
# 4 variance values for every position in the last axis.
variances_tensor = np.zeros_like(boxes_tensor) # `(height, width, n_boxes, 4)`
variances_tensor += self.variances # Long live broadcasting
# Now `boxes_tensor` becomes a tensor of shape `(height, width, n_boxes, 8)`
boxes_tensor = np.concatenate((boxes_tensor, variances_tensor), axis=-1)
# Below to make tensor 4D.
# (feature_map, n_boxes, 8)
boxes_tensor = boxes_tensor.reshape((-1, self.n_boxes, 8))
# Now prepend one dimension to `boxes_tensor` to account for the batch size and tile it.
# The result will be a 5D tensor of shape `(batch_size, height, width, n_boxes, 8)`
boxes_tensor = np.expand_dims(boxes_tensor, axis=0)
self.boxes_tensor = K.constant(boxes_tensor, dtype='float32')
# (feature_map, n_boxes, 8)
super(AnchorBoxes, self).build(input_shape)
def call(self, x, mask=None):
'''
Return an anchor box tensor based on the shape of the input tensor.
Note that this tensor does not participate in any graph computations at runtime.
It is being created as a constant once during graph creation and is just being
output along with the rest of the model output during runtime. Because of this,
all logic is implemented as Numpy array operations and it is sufficient to convert
the resulting Numpy array into a Keras tensor at the very end before outputting it.
Arguments:
x (tensor): 4D tensor of shape `(batch, channels, height, width)` if
`dim_ordering = 'th'` or `(batch, height, width, channels)` if
`dim_ordering = 'tf'`. The input for this layer must be the output
of the localization predictor layer.
'''
# Compute box width and height for each aspect ratio
# The shorter side of the image will be used to compute `w` and `h`.
box_tensor_dup = tf.identity(self.boxes_tensor)
with tf.name_scope(None, 'FirstDimTile'):
x_dup = tf.identity(x)
boxes_tensor = K.tile(box_tensor_dup, (K.shape(x_dup)[0], 1, 1, 1))
return boxes_tensor
def compute_output_shape(self, input_shape):
'''Layer output shape function.'''
batch_size, _, feature_map_height, feature_map_width = input_shape
return (batch_size, feature_map_height*feature_map_width, self.n_boxes, 8)
def get_config(self):
'''Layer get_config function.'''
config = {
'img_height': self.img_height,
'img_width': self.img_width,
'this_scale': self.this_scale,
'next_scale': self.next_scale,
'aspect_ratios': list(self.aspect_ratios),
'two_boxes_for_ar1': self.two_boxes_for_ar1,
'clip_boxes': self.clip_boxes,
'variances': list(self.variances)
}
base_config = super(AnchorBoxes, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/layers/anchor_box_layer.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/proto/__init__.py |
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/ssd/proto/augmentation_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/ssd/proto/augmentation_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n5nvidia_tao_tf1/cv/ssd/proto/augmentation_config.proto\"\xcd\x03\n\x12\x41ugmentationConfig\x12\x14\n\x0coutput_width\x18\x01 \x01(\x05\x12\x15\n\routput_height\x18\x02 \x01(\x05\x12\x16\n\x0eoutput_channel\x18\x03 \x01(\x05\x12\x1d\n\x15random_crop_min_scale\x18\x04 \x01(\x02\x12\x1d\n\x15random_crop_max_scale\x18\x05 \x01(\x02\x12\x1a\n\x12random_crop_min_ar\x18\x06 \x01(\x02\x12\x1a\n\x12random_crop_max_ar\x18\x07 \x01(\x02\x12\x1a\n\x12zoom_out_min_scale\x18\x08 \x01(\x02\x12\x1a\n\x12zoom_out_max_scale\x18\t \x01(\x02\x12\x12\n\nbrightness\x18\n \x01(\x05\x12\x10\n\x08\x63ontrast\x18\x0b \x01(\x02\x12\x12\n\nsaturation\x18\x0c \x01(\x02\x12\x0b\n\x03hue\x18\r \x01(\x05\x12\x13\n\x0brandom_flip\x18\x0e \x01(\x02\x12\x36\n\nimage_mean\x18\x0f \x03(\x0b\x32\".AugmentationConfig.ImageMeanEntry\x1a\x30\n\x0eImageMeanEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x02:\x02\x38\x01\x62\x06proto3')
)
_AUGMENTATIONCONFIG_IMAGEMEANENTRY = _descriptor.Descriptor(
name='ImageMeanEntry',
full_name='AugmentationConfig.ImageMeanEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='AugmentationConfig.ImageMeanEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='AugmentationConfig.ImageMeanEntry.value', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=471,
serialized_end=519,
)
_AUGMENTATIONCONFIG = _descriptor.Descriptor(
name='AugmentationConfig',
full_name='AugmentationConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='output_width', full_name='AugmentationConfig.output_width', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_height', full_name='AugmentationConfig.output_height', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_channel', full_name='AugmentationConfig.output_channel', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='random_crop_min_scale', full_name='AugmentationConfig.random_crop_min_scale', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='random_crop_max_scale', full_name='AugmentationConfig.random_crop_max_scale', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='random_crop_min_ar', full_name='AugmentationConfig.random_crop_min_ar', index=5,
number=6, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='random_crop_max_ar', full_name='AugmentationConfig.random_crop_max_ar', index=6,
number=7, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='zoom_out_min_scale', full_name='AugmentationConfig.zoom_out_min_scale', index=7,
number=8, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='zoom_out_max_scale', full_name='AugmentationConfig.zoom_out_max_scale', index=8,
number=9, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='brightness', full_name='AugmentationConfig.brightness', index=9,
number=10, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='contrast', full_name='AugmentationConfig.contrast', index=10,
number=11, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='saturation', full_name='AugmentationConfig.saturation', index=11,
number=12, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hue', full_name='AugmentationConfig.hue', index=12,
number=13, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='random_flip', full_name='AugmentationConfig.random_flip', index=13,
number=14, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_mean', full_name='AugmentationConfig.image_mean', index=14,
number=15, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_AUGMENTATIONCONFIG_IMAGEMEANENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=58,
serialized_end=519,
)
_AUGMENTATIONCONFIG_IMAGEMEANENTRY.containing_type = _AUGMENTATIONCONFIG
_AUGMENTATIONCONFIG.fields_by_name['image_mean'].message_type = _AUGMENTATIONCONFIG_IMAGEMEANENTRY
DESCRIPTOR.message_types_by_name['AugmentationConfig'] = _AUGMENTATIONCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AugmentationConfig = _reflection.GeneratedProtocolMessageType('AugmentationConfig', (_message.Message,), dict(
ImageMeanEntry = _reflection.GeneratedProtocolMessageType('ImageMeanEntry', (_message.Message,), dict(
DESCRIPTOR = _AUGMENTATIONCONFIG_IMAGEMEANENTRY,
__module__ = 'nvidia_tao_tf1.cv.ssd.proto.augmentation_config_pb2'
# @@protoc_insertion_point(class_scope:AugmentationConfig.ImageMeanEntry)
))
,
DESCRIPTOR = _AUGMENTATIONCONFIG,
__module__ = 'nvidia_tao_tf1.cv.ssd.proto.augmentation_config_pb2'
# @@protoc_insertion_point(class_scope:AugmentationConfig)
))
_sym_db.RegisterMessage(AugmentationConfig)
_sym_db.RegisterMessage(AugmentationConfig.ImageMeanEntry)
_AUGMENTATIONCONFIG_IMAGEMEANENTRY._options = None
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/proto/augmentation_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/ssd/proto/experiment.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_tf1.cv.ssd.proto import augmentation_config_pb2 as nvidia__tao__tf1_dot_cv_dot_ssd_dot_proto_dot_augmentation__config__pb2
from nvidia_tao_tf1.cv.common.proto import detection_sequence_dataset_config_pb2 as nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_detection__sequence__dataset__config__pb2
from nvidia_tao_tf1.cv.common.proto import training_config_pb2 as nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_training__config__pb2
from nvidia_tao_tf1.cv.common.proto import nms_config_pb2 as nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_nms__config__pb2
from nvidia_tao_tf1.cv.ssd.proto import eval_config_pb2 as nvidia__tao__tf1_dot_cv_dot_ssd_dot_proto_dot_eval__config__pb2
from nvidia_tao_tf1.cv.ssd.proto import ssd_config_pb2 as nvidia__tao__tf1_dot_cv_dot_ssd_dot_proto_dot_ssd__config__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/ssd/proto/experiment.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n,nvidia_tao_tf1/cv/ssd/proto/experiment.proto\x1a\x35nvidia_tao_tf1/cv/ssd/proto/augmentation_config.proto\x1a\x46nvidia_tao_tf1/cv/common/proto/detection_sequence_dataset_config.proto\x1a\x34nvidia_tao_tf1/cv/common/proto/training_config.proto\x1a/nvidia_tao_tf1/cv/common/proto/nms_config.proto\x1a-nvidia_tao_tf1/cv/ssd/proto/eval_config.proto\x1a,nvidia_tao_tf1/cv/ssd/proto/ssd_config.proto\"\xb7\x02\n\nExperiment\x12\x13\n\x0brandom_seed\x18\x01 \x01(\r\x12&\n\x0e\x64\x61taset_config\x18\x02 \x01(\x0b\x32\x0e.DatasetConfig\x12\x30\n\x13\x61ugmentation_config\x18\x03 \x01(\x0b\x32\x13.AugmentationConfig\x12(\n\x0ftraining_config\x18\x04 \x01(\x0b\x32\x0f.TrainingConfig\x12 \n\x0b\x65val_config\x18\x05 \x01(\x0b\x32\x0b.EvalConfig\x12\x1e\n\nnms_config\x18\x06 \x01(\x0b\x32\n.NMSConfig\x12 \n\nssd_config\x18\x07 \x01(\x0b\x32\n.SSDConfigH\x00\x12!\n\x0b\x64ssd_config\x18\x08 \x01(\x0b\x32\n.SSDConfigH\x00\x42\t\n\x07networkb\x06proto3')
,
dependencies=[nvidia__tao__tf1_dot_cv_dot_ssd_dot_proto_dot_augmentation__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_detection__sequence__dataset__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_training__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_nms__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_ssd_dot_proto_dot_eval__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_ssd_dot_proto_dot_ssd__config__pb2.DESCRIPTOR,])
_EXPERIMENT = _descriptor.Descriptor(
name='Experiment',
full_name='Experiment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='random_seed', full_name='Experiment.random_seed', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dataset_config', full_name='Experiment.dataset_config', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='augmentation_config', full_name='Experiment.augmentation_config', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='training_config', full_name='Experiment.training_config', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eval_config', full_name='Experiment.eval_config', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nms_config', full_name='Experiment.nms_config', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ssd_config', full_name='Experiment.ssd_config', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dssd_config', full_name='Experiment.dssd_config', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='network', full_name='Experiment.network',
index=0, containing_type=None, fields=[]),
],
serialized_start=372,
serialized_end=683,
)
_EXPERIMENT.fields_by_name['dataset_config'].message_type = nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_detection__sequence__dataset__config__pb2._DATASETCONFIG
_EXPERIMENT.fields_by_name['augmentation_config'].message_type = nvidia__tao__tf1_dot_cv_dot_ssd_dot_proto_dot_augmentation__config__pb2._AUGMENTATIONCONFIG
_EXPERIMENT.fields_by_name['training_config'].message_type = nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_training__config__pb2._TRAININGCONFIG
_EXPERIMENT.fields_by_name['eval_config'].message_type = nvidia__tao__tf1_dot_cv_dot_ssd_dot_proto_dot_eval__config__pb2._EVALCONFIG
_EXPERIMENT.fields_by_name['nms_config'].message_type = nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_nms__config__pb2._NMSCONFIG
_EXPERIMENT.fields_by_name['ssd_config'].message_type = nvidia__tao__tf1_dot_cv_dot_ssd_dot_proto_dot_ssd__config__pb2._SSDCONFIG
_EXPERIMENT.fields_by_name['dssd_config'].message_type = nvidia__tao__tf1_dot_cv_dot_ssd_dot_proto_dot_ssd__config__pb2._SSDCONFIG
_EXPERIMENT.oneofs_by_name['network'].fields.append(
_EXPERIMENT.fields_by_name['ssd_config'])
_EXPERIMENT.fields_by_name['ssd_config'].containing_oneof = _EXPERIMENT.oneofs_by_name['network']
_EXPERIMENT.oneofs_by_name['network'].fields.append(
_EXPERIMENT.fields_by_name['dssd_config'])
_EXPERIMENT.fields_by_name['dssd_config'].containing_oneof = _EXPERIMENT.oneofs_by_name['network']
DESCRIPTOR.message_types_by_name['Experiment'] = _EXPERIMENT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Experiment = _reflection.GeneratedProtocolMessageType('Experiment', (_message.Message,), dict(
DESCRIPTOR = _EXPERIMENT,
__module__ = 'nvidia_tao_tf1.cv.ssd.proto.experiment_pb2'
# @@protoc_insertion_point(class_scope:Experiment)
))
_sym_db.RegisterMessage(Experiment)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/proto/experiment_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/ssd/proto/ssd_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/ssd/proto/ssd_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n,nvidia_tao_tf1/cv/ssd/proto/ssd_config.proto\"\xa6\x03\n\tSSDConfig\x12\x15\n\raspect_ratios\x18\x01 \x01(\t\x12\x1c\n\x14\x61spect_ratios_global\x18\x02 \x01(\t\x12\x0e\n\x06scales\x18\x03 \x01(\t\x12\x11\n\tmin_scale\x18\x04 \x01(\x02\x12\x11\n\tmax_scale\x18\x05 \x01(\x02\x12\x19\n\x11two_boxes_for_ar1\x18\x06 \x01(\x08\x12\r\n\x05steps\x18\x07 \x01(\t\x12\x12\n\nclip_boxes\x18\x08 \x01(\x08\x12\x11\n\tvariances\x18\t \x01(\t\x12\x0f\n\x07offsets\x18\n \x01(\t\x12\x12\n\nmean_color\x18\x0b \x01(\t\x12\x0c\n\x04\x61rch\x18\x0c \x01(\t\x12\x0f\n\x07nlayers\x18\r \x01(\r\x12\x19\n\x11pred_num_channels\x18\x0e \x01(\r\x12\r\n\x05\x61lpha\x18\x0f \x01(\x02\x12\x15\n\rneg_pos_ratio\x18\x10 \x01(\x02\x12\x16\n\x0epos_iou_thresh\x18\x11 \x01(\x02\x12\x16\n\x0eneg_iou_thresh\x18\x14 \x01(\x02\x12\x15\n\rfreeze_blocks\x18\x12 \x03(\r\x12\x11\n\tfreeze_bn\x18\x13 \x01(\x08\x62\x06proto3')
)
_SSDCONFIG = _descriptor.Descriptor(
name='SSDConfig',
full_name='SSDConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='aspect_ratios', full_name='SSDConfig.aspect_ratios', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='aspect_ratios_global', full_name='SSDConfig.aspect_ratios_global', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='scales', full_name='SSDConfig.scales', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_scale', full_name='SSDConfig.min_scale', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_scale', full_name='SSDConfig.max_scale', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='two_boxes_for_ar1', full_name='SSDConfig.two_boxes_for_ar1', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='steps', full_name='SSDConfig.steps', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='clip_boxes', full_name='SSDConfig.clip_boxes', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='variances', full_name='SSDConfig.variances', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='offsets', full_name='SSDConfig.offsets', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mean_color', full_name='SSDConfig.mean_color', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='arch', full_name='SSDConfig.arch', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nlayers', full_name='SSDConfig.nlayers', index=12,
number=13, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pred_num_channels', full_name='SSDConfig.pred_num_channels', index=13,
number=14, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='alpha', full_name='SSDConfig.alpha', index=14,
number=15, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='neg_pos_ratio', full_name='SSDConfig.neg_pos_ratio', index=15,
number=16, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pos_iou_thresh', full_name='SSDConfig.pos_iou_thresh', index=16,
number=17, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='neg_iou_thresh', full_name='SSDConfig.neg_iou_thresh', index=17,
number=20, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freeze_blocks', full_name='SSDConfig.freeze_blocks', index=18,
number=18, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freeze_bn', full_name='SSDConfig.freeze_bn', index=19,
number=19, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=49,
serialized_end=471,
)
DESCRIPTOR.message_types_by_name['SSDConfig'] = _SSDCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SSDConfig = _reflection.GeneratedProtocolMessageType('SSDConfig', (_message.Message,), dict(
DESCRIPTOR = _SSDCONFIG,
__module__ = 'nvidia_tao_tf1.cv.ssd.proto.ssd_config_pb2'
# @@protoc_insertion_point(class_scope:SSDConfig)
))
_sym_db.RegisterMessage(SSDConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/proto/ssd_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/ssd/proto/eval_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/ssd/proto/eval_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n-nvidia_tao_tf1/cv/ssd/proto/eval_config.proto\"\xe2\x01\n\nEvalConfig\x12)\n!validation_period_during_training\x18\x01 \x01(\r\x12\x33\n\x16\x61verage_precision_mode\x18\x02 \x01(\x0e\x32\x13.EvalConfig.AP_MODE\x12\x12\n\nbatch_size\x18\x03 \x01(\r\x12\x1e\n\x16matching_iou_threshold\x18\x04 \x01(\x02\x12\x1a\n\x12visualize_pr_curve\x18\x05 \x01(\x08\"$\n\x07\x41P_MODE\x12\n\n\x06SAMPLE\x10\x00\x12\r\n\tINTEGRATE\x10\x01\x62\x06proto3')
)
_EVALCONFIG_AP_MODE = _descriptor.EnumDescriptor(
name='AP_MODE',
full_name='EvalConfig.AP_MODE',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='SAMPLE', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INTEGRATE', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=240,
serialized_end=276,
)
_sym_db.RegisterEnumDescriptor(_EVALCONFIG_AP_MODE)
_EVALCONFIG = _descriptor.Descriptor(
name='EvalConfig',
full_name='EvalConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='validation_period_during_training', full_name='EvalConfig.validation_period_during_training', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='average_precision_mode', full_name='EvalConfig.average_precision_mode', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batch_size', full_name='EvalConfig.batch_size', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='matching_iou_threshold', full_name='EvalConfig.matching_iou_threshold', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='visualize_pr_curve', full_name='EvalConfig.visualize_pr_curve', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_EVALCONFIG_AP_MODE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=50,
serialized_end=276,
)
_EVALCONFIG.fields_by_name['average_precision_mode'].enum_type = _EVALCONFIG_AP_MODE
_EVALCONFIG_AP_MODE.containing_type = _EVALCONFIG
DESCRIPTOR.message_types_by_name['EvalConfig'] = _EVALCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
EvalConfig = _reflection.GeneratedProtocolMessageType('EvalConfig', (_message.Message,), dict(
DESCRIPTOR = _EVALCONFIG,
__module__ = 'nvidia_tao_tf1.cv.ssd.proto.eval_config_pb2'
# @@protoc_insertion_point(class_scope:EvalConfig)
))
_sym_db.RegisterMessage(EvalConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/proto/eval_config_pb2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''DALI pipeline for SSD.'''
import glob
from math import ceil
import os
import nvidia.dali.fn as fn
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.plugin.tf as dali_tf
import nvidia.dali.tfrecord as tfrec
import nvidia.dali.types as types
import tensorflow as tf
ssd_features = {'frame/id': tfrec.FixedLenFeature((), tfrec.string, ""),
'frame/width': tfrec.FixedLenFeature([1], tfrec.int64, -1),
'frame/height': tfrec.FixedLenFeature([1], tfrec.int64, -1),
'frame/encoded': tfrec.FixedLenFeature((), tfrec.string, ""),
'target/object_class_id': tfrec.VarLenFeature(tfrec.float32, 0.0),
'target/observation_angle': tfrec.VarLenFeature(tfrec.float32, 0.0),
'target/occlusion': tfrec.VarLenFeature(tfrec.float32, 0.0),
'target/truncation': tfrec.VarLenFeature(tfrec.float32, 0.0),
'target/coordinates_x1': tfrec.VarLenFeature(tfrec.float32, 0.0),
'target/coordinates_y1': tfrec.VarLenFeature(tfrec.float32, 0.0),
'target/coordinates_x2': tfrec.VarLenFeature(tfrec.float32, 0.0),
'target/coordinates_y2': tfrec.VarLenFeature(tfrec.float32, 0.0),
'target/world_bbox_h': tfrec.VarLenFeature(tfrec.float32, 0.0),
'target/world_bbox_w': tfrec.VarLenFeature(tfrec.float32, 0.0),
'target/world_bbox_l': tfrec.VarLenFeature(tfrec.float32, 0.0),
'target/world_bbox_x': tfrec.VarLenFeature(tfrec.float32, 0.0),
'target/world_bbox_y': tfrec.VarLenFeature(tfrec.float32, 0.0),
'target/world_bbox_z': tfrec.VarLenFeature(tfrec.float32, 0.0),
'target/world_bbox_rot_y': tfrec.VarLenFeature(tfrec.float32, 0.0)}
def create_ssd_training_pipeline(experiment_spec, local_rank, shard_id, num_shards):
"""Create DALI pipeline for SSD."""
batch_size = experiment_spec.training_config.batch_size_per_gpu
n_workers = experiment_spec.training_config.n_workers or 4
dataset_config = experiment_spec.dataset_config
augmentation_config = experiment_spec.augmentation_config
# Configure the augmentation
output_width = augmentation_config.output_width
output_height = augmentation_config.output_height
output_channel = augmentation_config.output_channel
min_ar = augmentation_config.random_crop_min_ar or 0.5
max_ar = augmentation_config.random_crop_max_ar or 2.0
min_scale = augmentation_config.random_crop_min_scale or 0.3
max_scale = augmentation_config.random_crop_max_scale or 1.0
zoom_out_prob = 0.5
min_zoom_out = int(augmentation_config.zoom_out_min_scale) or 1
max_zoom_out = int(augmentation_config.zoom_out_max_scale) or 4
s_delta = augmentation_config.saturation
c_delta = augmentation_config.contrast
b_delta = int(augmentation_config.brightness)
b_delta /= 256
h_delta = int(augmentation_config.hue)
random_flip = augmentation_config.random_flip
img_mean = augmentation_config.image_mean
# get the tfrecord list from pattern
data_sources = dataset_config.data_sources
tfrecord_path_list = []
for data_source in data_sources:
tfrecord_pattern = str(data_source.tfrecords_path)
tf_file_list = sorted(glob.glob(tfrecord_pattern))
tfrecord_path_list.extend(tf_file_list)
# create index for tfrecords
idx_path_list = []
total_sample_cnt = 0
for tfrecord_path in tfrecord_path_list:
root_path, tfrecord_file = os.path.split(tfrecord_path)
idx_path = os.path.join(root_path, "idx-"+tfrecord_file)
if not os.path.exists(idx_path):
raise ValueError("The index file {} for {} does not exist.".format(idx_path,
tfrecord_path))
else:
with open(idx_path, "r") as f:
total_sample_cnt += len(f.readlines())
idx_path_list.append(idx_path)
# create ssd augmentation pipeline
pipe = Pipeline(batch_size=batch_size,
num_threads=n_workers,
device_id=local_rank)
with pipe:
inputs = fn.readers.tfrecord(
path=tfrecord_path_list,
index_path=idx_path_list,
features=ssd_features,
num_shards=num_shards,
shard_id=shard_id
)
images = inputs['frame/encoded']
# @TODO(tylerz): hsv api requires RGB color space.
# Need permutation channels before feeding to model.
images = fn.decoders.image(
images,
device="mixed",
output_type=types.RGB
)
x1 = inputs['target/coordinates_x1']
y1 = inputs['target/coordinates_y1']
x2 = inputs['target/coordinates_x2']
y2 = inputs['target/coordinates_y2']
bboxes = fn.stack(x1, y1, x2, y2, axis=1)
class_ids = fn.cast(inputs['target/object_class_id'],
dtype=types.INT32)
# random expand:
zoom_flip_coin = fn.random.coin_flip(probability=zoom_out_prob)
ratio = 1 + zoom_flip_coin * fn.random.uniform(range=[min_zoom_out-1, max_zoom_out-1])
paste_x = fn.random.uniform(range=[0, 1])
paste_y = fn.random.uniform(range=[0, 1])
images = fn.paste(images, ratio=ratio, paste_x=paste_x, paste_y=paste_y,
fill_value=(123.68, 116.779, 103.939))
bboxes = fn.bbox_paste(bboxes, ratio=ratio, paste_x=paste_x, paste_y=paste_y,
ltrb=True)
# random bbox crop:
crop_begin, crop_size, bboxes, class_ids = \
fn.random_bbox_crop(bboxes, class_ids,
device="cpu",
aspect_ratio=[min_ar, max_ar],
thresholds=[0, 0.1, 0.3, 0.5, 0.7, 0.9],
scaling=[min_scale, max_scale],
bbox_layout="xyXY",
allow_no_crop=True,
num_attempts=50)
# image slice according to cropped bboxes
images = fn.slice(images, crop_begin, crop_size,
out_of_bounds_policy="trim_to_shape")
# resize
images = fn.resize(images,
resize_x=output_width,
resize_y=output_height,
min_filter=types.DALIInterpType.INTERP_LINEAR,
antialias=False)
# color augmentation:
# - saturation
# - contrast
# - brightness
# - hue
if s_delta > 0:
random_saturation = fn.random.uniform(range=[1 - s_delta, 1 + s_delta])
else:
random_saturation = 1
if c_delta > 0:
random_contrast = fn.random.uniform(range=[1 - c_delta, 1 + c_delta])
else:
random_contrast = 1
if b_delta > 0:
random_brightness = fn.random.uniform(range=[1 - b_delta, 1 + b_delta])
else:
random_brightness = 1
if h_delta > 0:
random_hue = fn.random.uniform(range=[0, h_delta])
else:
random_hue = 0
images = fn.hsv(images,
dtype=types.FLOAT,
hue=random_hue,
saturation=random_saturation)
images = fn.brightness_contrast(images,
contrast_center=128, # input is in float, but in 0..255
dtype=types.UINT8,
brightness=random_brightness,
contrast=random_contrast)
# RGB -> BGR
images = fn.color_space_conversion(images, image_type=types.RGB, output_type=types.BGR)
# flip the the image and bbox
horizontal_flip_coin = fn.random.coin_flip(probability=random_flip)
bboxes = fn.bb_flip(bboxes, ltrb=True, horizontal=horizontal_flip_coin)
# crop_mirror_normalize
# mean=[0.485 * 255, 0.456 * 255, 0.406 * 255] is for RGB
# std=[0.229 * 255, 0.224 * 255, 0.225 * 255], is for RGB
if output_channel == 3:
mean = [0.406 * 255, 0.456 * 255, 0.485 * 255]
elif output_channel == 1:
mean = [117.3786, 117.3786, 117.3786]
if img_mean:
if output_channel == 3:
mean = [img_mean['b'], img_mean['g'], img_mean['r']]
else:
mean = [img_mean['l'], img_mean['l'], img_mean['l']]
if output_channel == 3:
images = fn.crop_mirror_normalize(images,
mean=mean,
mirror=horizontal_flip_coin,
dtype=types.FLOAT,
output_layout="CHW",
pad_output=False)
elif output_channel == 1:
images = fn.crop_mirror_normalize(images,
mean=mean,
mirror=horizontal_flip_coin,
dtype=types.UINT8,
output_layout="HWC",
pad_output=False)
images = fn.color_space_conversion(images, image_type=types.BGR, output_type=types.GRAY)
images = fn.transpose(images, perm=[2, 0, 1], output_layout="CHW")
images = fn.cast(images, dtype=types.FLOAT)
# @TODO(tylerz): Encoding bboxes labels using the fn.box_encoder()
# # encode bbox with anchors
# # - bboxes shape [#boxes, 4]
# # - class_ids shape [#boxes, ]
# bboxes, class_ids = fn.box_encoder(bboxes, class_ids,
# criteria=0.5,
# anchors=default_boxes)
# # generate one-hot class vector
# class_ids = fn.one_hot(class_ids,
# dtype=types.FLOAT,
# num_classes=num_classes)
# # generate final label #class + bbox
# labels = fn.cat(class_ids, bboxes, axis=1)
class_ids = fn.cast(class_ids,
dtype=types.FLOAT)
class_ids = fn.pad(class_ids, fill_value=-1)
bboxes = fn.pad(bboxes)
pipe.set_outputs(images, class_ids, bboxes)
image_shape = (batch_size, output_channel, output_height, output_width)
return pipe, total_sample_cnt, image_shape
class SSDDALIDataset():
"""SSD dataset using DALI. Only works for train dataset now."""
def __init__(self, experiment_spec, device_id, shard_id, num_shards):
"""
Init the SSD DALI dataset.
Arguments:
experiment_spec: experiment spec for training.
device_id: local rank.
shard_id: rank.
num_shards: number of gpus.
"""
pipe, total_sample_cnt, image_shape = \
create_ssd_training_pipeline(experiment_spec,
device_id,
shard_id,
num_shards)
self.n_samples = total_sample_cnt
self.image_shape = image_shape
self.batch_size = image_shape[0]
daliop = dali_tf.DALIIterator()
self.images, self.class_ids, self.bboxes = daliop(
pipeline=pipe,
shapes=[self.image_shape, (), ()],
dtypes=[tf.float32,
tf.float32,
tf.float32]
)
self._post_process_label()
def _post_process_label(self):
"""Generate final labels for encoder."""
labels = []
for batch_idx in range(self.batch_size):
cls_in_batch = tf.expand_dims(self.class_ids[batch_idx], axis=-1)
bboxes_in_batch = self.bboxes[batch_idx]
mask = tf.greater_equal(cls_in_batch, 0)
mask.set_shape([None])
label = tf.concat([cls_in_batch, bboxes_in_batch], axis=-1)
label = tf.boolean_mask(label, mask)
label = tf.reshape(label, (-1, 5))
labels.append(label)
self.labels = labels
def set_encoder(self, encoder):
"""Set the label encoder for ground truth."""
self.labels = encoder(self.labels)
def __len__(self):
"""Return the total sample number."""
return int(ceil(self.n_samples / self.batch_size))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/builders/dalipipeline_builder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''build model for training or inference.'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import keras.backend as K
from nvidia_tao_tf1.cv.ssd.architecture.ssd_arch import ssd
from nvidia_tao_tf1.cv.ssd.utils.model_io import load_model
from nvidia_tao_tf1.cv.ssd.utils.spec_loader import eval_str
def build(experiment_spec,
is_dssd,
input_tensor=None,
kernel_regularizer=None):
'''
Build a model for training with or without training tensors.
For inference, this function can be used to build a base model, which can be passed into
eval_builder to attach a decode layer.
'''
img_channels = experiment_spec.augmentation_config.output_channel
img_height = experiment_spec.augmentation_config.output_height
img_width = experiment_spec.augmentation_config.output_width
cls_mapping = experiment_spec.dataset_config.target_class_mapping
classes = sorted({str(x) for x in cls_mapping.values()})
# n_classes + 1 for background class
n_classes = len(classes) + 1
scales = eval_str(experiment_spec.ssd_config.scales)
aspect_ratios_global = eval_str(
experiment_spec.ssd_config.aspect_ratios_global)
aspect_ratios_per_layer = eval_str(
experiment_spec.ssd_config.aspect_ratios)
steps = eval_str(experiment_spec.ssd_config.steps)
offsets = eval_str(experiment_spec.ssd_config.offsets)
variances = eval_str(experiment_spec.ssd_config.variances)
freeze_blocks = eval_str(experiment_spec.ssd_config.freeze_blocks)
freeze_bn = eval_str(experiment_spec.ssd_config.freeze_bn)
# original_learning_phase = K.learning_phase()
# set learning to be 1 for generating train graph
model_train = ssd(image_size=(img_channels, img_height, img_width),
n_classes=n_classes,
is_dssd=is_dssd,
nlayers=experiment_spec.ssd_config.nlayers,
pred_num_channels=experiment_spec.ssd_config.pred_num_channels,
kernel_regularizer=kernel_regularizer,
freeze_blocks=freeze_blocks,
freeze_bn=freeze_bn,
scales=scales,
min_scale=experiment_spec.ssd_config.min_scale,
max_scale=experiment_spec.ssd_config.max_scale,
aspect_ratios_global=aspect_ratios_global,
aspect_ratios_per_layer=aspect_ratios_per_layer,
two_boxes_for_ar1=experiment_spec.ssd_config.two_boxes_for_ar1,
steps=steps,
offsets=offsets,
clip_boxes=experiment_spec.ssd_config.clip_boxes,
variances=variances,
arch=experiment_spec.ssd_config.arch,
input_tensor=input_tensor,
qat=experiment_spec.training_config.enable_qat)
if experiment_spec.training_config.enable_qat:
# Save a temp model to avoid multiple calls to create_quantized_keras_model
_, temp_model_path = tempfile.mkstemp(suffix='.hdf5')
model_train.save(temp_model_path)
# Set learning to be 0 for generating eval graph
K.set_learning_phase(0)
model_eval = load_model(temp_model_path, experiment_spec,
is_dssd)
os.remove(temp_model_path)
else:
K.set_learning_phase(0)
model_eval = ssd(image_size=(img_channels, img_height, img_width),
n_classes=n_classes,
is_dssd=is_dssd,
nlayers=experiment_spec.ssd_config.nlayers,
pred_num_channels=experiment_spec.ssd_config.pred_num_channels,
kernel_regularizer=kernel_regularizer,
freeze_blocks=freeze_blocks,
freeze_bn=freeze_bn,
scales=scales,
min_scale=experiment_spec.ssd_config.min_scale,
max_scale=experiment_spec.ssd_config.max_scale,
aspect_ratios_global=aspect_ratios_global,
aspect_ratios_per_layer=aspect_ratios_per_layer,
two_boxes_for_ar1=experiment_spec.ssd_config.two_boxes_for_ar1,
steps=steps,
offsets=offsets,
clip_boxes=experiment_spec.ssd_config.clip_boxes,
variances=variances,
arch=experiment_spec.ssd_config.arch,
input_tensor=None,
qat=experiment_spec.training_config.enable_qat)
# TODO(tylerz): Hard code the learning phase to 1 since the build only be called in train
K.set_learning_phase(1)
return model_train, model_eval
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/builders/model_builder.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/builders/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''build train dataset and val dataset.'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from nvidia_tao_tf1.cv.ssd.architecture.ssd_arch import ssd
from nvidia_tao_tf1.cv.ssd.box_coder.input_encoder import SSDInputEncoder
from nvidia_tao_tf1.cv.ssd.box_coder.ssd_input_encoder import SSDInputEncoderNP
from nvidia_tao_tf1.cv.ssd.builders.dalipipeline_builder import SSDDALIDataset
from nvidia_tao_tf1.cv.ssd.builders.data_sequence import SSDDataSequence
from nvidia_tao_tf1.cv.ssd.utils.spec_loader import eval_str
def build_dataset(experiment_spec,
is_dssd,
pos_iou_threshold=0.5,
neg_iou_limit=0.5,
device_id=0,
shard_id=0,
num_shards=1):
"""Build the DALI based dataset or Keras sequence based dataset."""
img_channels = experiment_spec.augmentation_config.output_channel
img_height = experiment_spec.augmentation_config.output_height
img_width = experiment_spec.augmentation_config.output_width
cls_mapping = experiment_spec.dataset_config.target_class_mapping
classes = sorted({str(x) for x in cls_mapping.values()})
# n_classes + 1 for background class
n_classes = len(classes) + 1
scales = eval_str(experiment_spec.ssd_config.scales)
aspect_ratios_global = eval_str(
experiment_spec.ssd_config.aspect_ratios_global)
aspect_ratios_per_layer = eval_str(
experiment_spec.ssd_config.aspect_ratios)
steps = eval_str(experiment_spec.ssd_config.steps)
offsets = eval_str(experiment_spec.ssd_config.offsets)
variances = eval_str(experiment_spec.ssd_config.variances)
freeze_blocks = eval_str(experiment_spec.ssd_config.freeze_blocks)
freeze_bn = eval_str(experiment_spec.ssd_config.freeze_bn)
# Use a fake model to get predictor's size
model_fake = ssd(image_size=(img_channels, img_height, img_width),
n_classes=n_classes,
is_dssd=is_dssd,
nlayers=experiment_spec.ssd_config.nlayers,
pred_num_channels=experiment_spec.ssd_config.pred_num_channels,
kernel_regularizer=None,
freeze_blocks=freeze_blocks,
freeze_bn=freeze_bn,
scales=scales,
min_scale=experiment_spec.ssd_config.min_scale,
max_scale=experiment_spec.ssd_config.max_scale,
aspect_ratios_global=aspect_ratios_global,
aspect_ratios_per_layer=aspect_ratios_per_layer,
two_boxes_for_ar1=experiment_spec.ssd_config.two_boxes_for_ar1,
steps=steps,
offsets=offsets,
clip_boxes=experiment_spec.ssd_config.clip_boxes,
variances=variances,
arch=experiment_spec.ssd_config.arch,
input_tensor=None,
qat=False)
predictor_sizes = [model_fake.get_layer('ssd_conf_0').output_shape[2:],
model_fake.get_layer('ssd_conf_1').output_shape[2:],
model_fake.get_layer('ssd_conf_2').output_shape[2:],
model_fake.get_layer('ssd_conf_3').output_shape[2:],
model_fake.get_layer('ssd_conf_4').output_shape[2:],
model_fake.get_layer('ssd_conf_5').output_shape[2:]]
use_dali = False
# @TODO(tylerz): if there is tfrecord, then use dali.
if experiment_spec.dataset_config.data_sources[0].tfrecords_path != "":
use_dali = True
print("Using DALI augmentation pipeline.")
if use_dali:
train_dataset = SSDDALIDataset(experiment_spec=experiment_spec,
device_id=device_id,
shard_id=shard_id,
num_shards=num_shards)
else:
train_dataset = \
SSDDataSequence(dataset_config=experiment_spec.dataset_config,
augmentation_config=experiment_spec.augmentation_config,
batch_size=experiment_spec.training_config.batch_size_per_gpu,
is_training=True)
if experiment_spec.ssd_config.pos_iou_thresh != 0.0:
pos_iou_threshold = experiment_spec.ssd_config.pos_iou_thresh
else:
pos_iou_threshold = 0.5
if experiment_spec.ssd_config.neg_iou_thresh != 0.0:
neg_iou_limit = experiment_spec.ssd_config.neg_iou_thresh
else:
neg_iou_limit = 0.5
ssd_input_encoder = \
SSDInputEncoderNP(img_height=img_height,
img_width=img_width,
n_classes=n_classes,
predictor_sizes=predictor_sizes,
scales=scales,
min_scale=experiment_spec.ssd_config.min_scale,
max_scale=experiment_spec.ssd_config.max_scale,
aspect_ratios_global=aspect_ratios_global,
aspect_ratios_per_layer=aspect_ratios_per_layer,
two_boxes_for_ar1=experiment_spec.ssd_config.two_boxes_for_ar1,
steps=steps,
offsets=offsets,
clip_boxes=experiment_spec.ssd_config.clip_boxes,
variances=variances,
pos_iou_threshold=pos_iou_threshold,
neg_iou_limit=neg_iou_limit)
if use_dali:
ssd_input_encoder_tf = \
SSDInputEncoder(img_height=img_height,
img_width=img_width,
n_classes=n_classes,
predictor_sizes=predictor_sizes,
scales=scales,
min_scale=experiment_spec.ssd_config.min_scale,
max_scale=experiment_spec.ssd_config.max_scale,
aspect_ratios_global=aspect_ratios_global,
aspect_ratios_per_layer=aspect_ratios_per_layer,
two_boxes_for_ar1=experiment_spec.ssd_config.two_boxes_for_ar1,
steps=steps,
offsets=offsets,
clip_boxes=experiment_spec.ssd_config.clip_boxes,
variances=variances,
pos_iou_threshold=0.5,
neg_iou_limit=0.5,
gt_normalized=True)
train_encoder = ssd_input_encoder_tf
else:
train_encoder = ssd_input_encoder
train_dataset.set_encoder(train_encoder)
def eval_encode_fn(gt_label):
bboxes = gt_label[:, -4:]
cls_id = gt_label[:, 0:1]
gt_label_without_diff = np.concatenate((cls_id, bboxes), axis=-1)
return (ssd_input_encoder(gt_label_without_diff), gt_label)
val_dataset = SSDDataSequence(dataset_config=experiment_spec.dataset_config,
augmentation_config=experiment_spec.augmentation_config,
batch_size=experiment_spec.eval_config.batch_size,
is_training=False,
encode_fn=eval_encode_fn)
return train_dataset, val_dataset
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/builders/dataset_builder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''build model for evaluation.'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Input
from keras.models import Model
from nvidia_tao_tf1.cv.ssd.box_coder.output_decoder_layer import DecodeDetections
def build(training_model,
confidence_thresh=0.05,
iou_threshold=0.5,
top_k=200,
nms_max_output_size=1000,
include_encoded_pred=False):
'''build model for evaluation.'''
im_channel, im_height, im_width = training_model.layers[0].input_shape[1:]
decoded_predictions = DecodeDetections(confidence_thresh=confidence_thresh,
iou_threshold=iou_threshold,
top_k=top_k,
nms_max_output_size=nms_max_output_size,
img_height=im_height,
img_width=im_width,
name='decoded_predictions')
if include_encoded_pred:
model_output = [training_model.layers[-1].output,
decoded_predictions(training_model.layers[-1].output)]
else:
model_output = decoded_predictions(training_model.layers[-1].output)
eval_model = Model(inputs=training_model.layers[1].input,
outputs=model_output)
new_input = Input(shape=(im_channel, im_height, im_width))
eval_model = Model(inputs=new_input, outputs=eval_model(new_input))
return eval_model
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/builders/eval_builder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TLT SSD data sequence."""
import numpy as np
from nvidia_tao_tf1.cv.common.dataio.detection_data_sequence import DetectionDataSequence
from nvidia_tao_tf1.cv.ssd.builders.data_generator.data_augmentation_chain_original_ssd import (
SSDDataAugmentation
)
from nvidia_tao_tf1.cv.ssd.builders.data_generator.object_detection_2d_geometric_ops import Resize
class SSDDataSequence(DetectionDataSequence):
"""SSD data sequence."""
def __init__(self, dataset_config, *args, **kwargs):
"""Init function."""
super().__init__(dataset_config=dataset_config, *args, **kwargs)
self.output_height = self.augmentation_config.output_height
self.output_width = self.augmentation_config.output_width
# mapping class to 1-based integer
mapping_dict = dataset_config.target_class_mapping
self.classes = sorted({str(x).lower() for x in mapping_dict.values()})
val_class_mapping = dict(
zip(self.classes, range(1, len(self.classes)+1)))
self.class_mapping = {key.lower(): val_class_mapping[str(val.lower())]
for key, val in mapping_dict.items()}
def set_encoder(self, encode_fn):
'''Set label encoder.'''
self.encode_fn = encode_fn
def _load_gt_label(self, label_path):
"""Load Kitti labels.
Returns:
[class_idx, is_difficult, x_min, y_min, x_max, y_max]
"""
entries = open(label_path, 'r').read().strip().split('\n')
results = []
for entry in entries:
items = entry.strip().split()
if len(items) < 9:
continue
items[0] = items[0].lower()
if items[0] not in self.class_mapping:
continue
label = [self.class_mapping[items[0]], 1 if int(
items[2]) != 0 else 0, *items[4:8]]
results.append([float(x) for x in label])
return np.array(results).reshape(-1, 6)
def _preprocessing(self, image, label, output_img_size):
'''
SSD-style data augmentation will be performed in training.
And in evaluation/inference phase, only resize will be performed;
'''
# initial SSD augmentation parameters:
if self.is_training:
augmentation_func = \
SSDDataAugmentation(img_height=self.augmentation_config.output_height,
img_width=self.augmentation_config.output_width,
rc_min=self.augmentation_config.random_crop_min_scale or 0.3,
rc_max=self.augmentation_config.random_crop_max_scale or 1.0,
rc_min_ar=self.augmentation_config.random_crop_min_ar or 0.5,
rc_max_ar=self.augmentation_config.random_crop_max_ar or 2.0,
zo_min=self.augmentation_config.zoom_out_min_scale or 1.0,
zo_max=self.augmentation_config.zoom_out_max_scale or 4.0,
b_delta=self.augmentation_config.brightness,
c_delta=self.augmentation_config.contrast,
s_delta=self.augmentation_config.saturation,
h_delta=self.augmentation_config.hue,
flip_prob=self.augmentation_config.random_flip,
background=(123.68, 116.779, 103.939))
else:
augmentation_func = Resize(height=self.augmentation_config.output_height,
width=self.augmentation_config.output_width)
if self.is_training:
bboxes = label[:, -4:]
cls_id = label[:, 0:1]
label = np.concatenate((cls_id, bboxes), axis=-1)
image, label = augmentation_func(image, label)
else:
bboxes = label[:, -4:]
cls_id = label[:, 0:1]
temp_label = np.concatenate((cls_id, bboxes), axis=-1)
image, temp_label = augmentation_func(image, temp_label)
# Finalize
label[:, -4:] = temp_label[:, -4:]
label = self._filter_invalid_labels(label)
if self.encode_fn is not None:
label = self.encode_fn(label)
return image, label
def _filter_invalid_labels(self, labels):
"""filter out invalid labels.
Arg:
labels: size (N, 5) or (N, 6), where bboxes is normalized to 0~1.
Returns:
labels: size (M, 5) or (N, 6), filtered bboxes with clipped boxes.
"""
# clip
# -4 -3 -2 -1
x_coords = labels[:, [-4, -2]]
x_coords = np.clip(x_coords, 0, self.output_width - 1)
labels[:, [-4, -2]] = x_coords
y_coords = labels[:, [-3, -1]]
y_coords = np.clip(y_coords, 0, self.output_height - 1)
labels[:, [-3, -1]] = y_coords
# exclude invalid boxes
x_cond = labels[:, -2] - labels[:, -4] > 1e-3
y_cond = labels[:, -1] - labels[:, -3] > 1e-3
return labels[x_cond & y_cond]
def _get_single_item(self, idx, output_img_size):
"""Load and process single image and its label."""
image = self._load_gt_image(self.image_paths[self.data_inds[idx]])
label = self._load_gt_label(self.label_paths[self.data_inds[idx]])
return self._preprocessing(image, label, output_img_size)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/builders/data_sequence.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA SSD data loader builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nvidia_tao_tf1.blocks.multi_source_loader.types.bbox_2d_label import Bbox2DLabel
import nvidia_tao_tf1.core
from nvidia_tao_tf1.cv.detectnet_v2.dataloader.build_dataloader import build_dataloader
from nvidia_tao_tf1.cv.ssd.utils.tensor_utils import get_non_empty_rows_2d_sparse
class ssd_data:
"""
Data loader class.
ssd_data can be used in two ways:
1. build groundtruth image and label TF tensors. Those two tensors can be
directly used for training.
2. build a generator that yields image and label numpy arrays. In this case,
a TF session needs to be passed into the class initializer.
"""
def __init__(self,
experiment_spec,
label_encoder=None,
training=True,
sess=None):
"""
Data loader init function.
Arguments:
experiment_spec: The loaded config pb2.
label_encoder (function, optional): If passed in, groundtruth label will be encoded.
training (bool): Return training set or validation set.
sess (TF Session): Required if generator() function needs to be called. Otherwise, just
pass None.
"""
dataset_proto = experiment_spec.dataset_config
dataloader = build_dataloader(
dataset_proto=dataset_proto,
augmentation_proto=experiment_spec.augmentation_config)
if training:
batch_size = experiment_spec.training_config.batch_size_per_gpu
else:
batch_size = experiment_spec.eval_config.batch_size
self.images, self.ground_truth_labels, self.num_samples = \
dataloader.get_dataset_tensors(batch_size, training=training,
enable_augmentation=training)
if self.num_samples == 0:
return
cls_mapping_dict = experiment_spec.dataset_config.target_class_mapping
self.classes = sorted({str(x) for x in cls_mapping_dict.values()})
cls_map = nvidia_tao_tf1.core.processors.LookupTable(keys=self.classes,
values=list(range(len(self.classes))),
default_value=-1)
cls_map.build()
self.H, self.W = self.images.get_shape().as_list()[2:]
self.label_encoder = label_encoder
# preprocess input.
self.images *= 255.0
num_channels = experiment_spec.augmentation_config.preprocessing.output_image_channel
if num_channels == 3:
perm = tf.constant([2, 1, 0])
self.images = tf.gather(self.images, perm, axis=1)
self.images -= tf.constant([[[[103.939]], [[116.779]], [[123.68]]]])
elif num_channels == 1:
self.images -= 117.3786
else:
raise NotImplementedError(
"Invalid number of channels {} requested.".format(num_channels)
)
gt_labels = []
if isinstance(self.ground_truth_labels, list):
for l in self.ground_truth_labels:
obj_id = cls_map(l['target/object_class'])
x1 = tf.clip_by_value(tf.cast(tf.round(l['target/coordinates_x1']), tf.int32), 0,
self.W - 1)
x2 = tf.clip_by_value(tf.cast(tf.round(l['target/coordinates_x2']), tf.int32), 0,
self.W - 1)
y1 = tf.clip_by_value(tf.cast(tf.round(l['target/coordinates_y1']), tf.int32), 0,
self.H - 1)
y2 = tf.clip_by_value(tf.cast(tf.round(l['target/coordinates_y2']), tf.int32), 0,
self.H - 1)
# only select valid labels
select = tf.logical_and(tf.not_equal(obj_id, -1),
tf.logical_and(tf.less(x1, x2), tf.less(y1, y2)))
label = tf.stack([obj_id, x1, y1, x2, y2], axis=1)
gt_labels.append(tf.boolean_mask(label, select))
elif isinstance(self.ground_truth_labels, Bbox2DLabel):
source_classes = self.ground_truth_labels.object_class
mapped_classes = tf.SparseTensor(
values=cls_map(source_classes.values),
indices=source_classes.indices,
dense_shape=source_classes.dense_shape)
mapped_labels = self.ground_truth_labels._replace(object_class=mapped_classes)
valid_indices = tf.not_equal(mapped_classes.values, -1)
filtered_labels = mapped_labels.filter(valid_indices)
filtered_obj_ids = tf.sparse.reshape(filtered_labels.object_class, [batch_size, -1, 1])
filtered_coords = tf.sparse.reshape(filtered_labels.vertices.coordinates,
[batch_size, -1, 4])
filtered_coords = tf.sparse.SparseTensor(
values=tf.cast(tf.round(filtered_coords.values), tf.int32),
indices=filtered_coords.indices,
dense_shape=filtered_coords.dense_shape)
labels_all = tf.sparse.concat(axis=-1, sp_inputs=[filtered_obj_ids, filtered_coords])
labels_split = tf.sparse.split(sp_input=labels_all, num_split=batch_size, axis=0)
labels_split = [tf.sparse.reshape(x, [-1, 5]) for x in labels_split]
labels = [tf.sparse.to_dense(get_non_empty_rows_2d_sparse(x)) for x in labels_split]
for l in labels:
obj_id = l[:, 0]
x1 = tf.clip_by_value(l[:, 1], 0, self.W - 1)
x2 = tf.clip_by_value(l[:, 3], 0, self.W - 1)
y1 = tf.clip_by_value(l[:, 2], 0, self.H - 1)
y2 = tf.clip_by_value(l[:, 4], 0, self.H - 1)
# only select valid labels
select = tf.logical_and(tf.not_equal(obj_id, -1),
tf.logical_and(tf.less(x1, x2), tf.less(y1, y2)))
label = tf.stack([obj_id, x1, y1, x2, y2], axis=1)
gt_labels.append(tf.boolean_mask(label, select))
else:
raise TypeError('Input must be either list or Bbox2DLabel instance')
self.gt_labels = gt_labels
self.ground_truth_labels = gt_labels
if self.label_encoder is not None:
self.ground_truth_labels = self.label_encoder(gt_labels)
self.sess = sess
def set_encoder(self, label_encoder):
"""Set a new label encoder for output labels."""
self.ground_truth_labels = label_encoder(self.gt_labels)
def generator(self):
"""Yields img and label numpy arrays."""
if self.sess is None:
raise ValueError('TF session can not be found. Pass a session to the initializer!')
while True:
img, label = self.sess.run([self.images, self.ground_truth_labels])
yield img, label
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/builders/inputs_builder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test ssd dali dataloader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from keras import backend as K
import numpy as np
from PIL import Image
import pytest
import tensorflow as tf
from nvidia_tao_tf1.cv.detectnet_v2.common.dataio.converter_lib import _bytes_feature
from nvidia_tao_tf1.cv.detectnet_v2.common.dataio.converter_lib import _float_feature
from nvidia_tao_tf1.cv.detectnet_v2.common.dataio.converter_lib import _int64_feature
from nvidia_tao_tf1.cv.ssd.builders.dalipipeline_builder import SSDDALIDataset
from nvidia_tao_tf1.cv.ssd.scripts.dataset_convert import create_tfrecord_idx
from nvidia_tao_tf1.cv.ssd.utils.spec_loader import load_experiment_spec
def create_sample_example():
example = tf.train.Example(features=tf.train.Features(feature={
'frame/id': _bytes_feature("001.jpg".encode('utf-8')),
'frame/height': _int64_feature(375),
'frame/width': _int64_feature(500),
}))
img = np.random.randint(low=0, high=255, size=(375, 500, 3), dtype=np.uint8)
tmp_im = Image.fromarray(img)
tmp_im.save("fake_img.jpg")
with open("fake_img.jpg", "rb") as f:
image_string = f.read()
f = example.features.feature
f['frame/encoded'].MergeFrom(_bytes_feature(image_string))
os.remove("fake_img.jpg")
truncation = [0]
occlusion = [0]
observation_angle = [0]
coordinates_x1 = [0.1]
coordinates_y1 = [0.1]
coordinates_x2 = [0.8]
coordinates_y2 = [0.8]
world_bbox_h = [0]
world_bbox_w = [0]
world_bbox_l = [0]
world_bbox_x = [0]
world_bbox_y = [0]
world_bbox_z = [0]
world_bbox_rot_y = [0]
object_class_ids = [0]
f['target/object_class_id'].MergeFrom(_float_feature(*object_class_ids))
f['target/truncation'].MergeFrom(_float_feature(*truncation))
f['target/occlusion'].MergeFrom(_int64_feature(*occlusion))
f['target/observation_angle'].MergeFrom(_float_feature(*observation_angle))
f['target/coordinates_x1'].MergeFrom(_float_feature(*coordinates_x1))
f['target/coordinates_y1'].MergeFrom(_float_feature(*coordinates_y1))
f['target/coordinates_x2'].MergeFrom(_float_feature(*coordinates_x2))
f['target/coordinates_y2'].MergeFrom(_float_feature(*coordinates_y2))
f['target/world_bbox_h'].MergeFrom(_float_feature(*world_bbox_h))
f['target/world_bbox_w'].MergeFrom(_float_feature(*world_bbox_w))
f['target/world_bbox_l'].MergeFrom(_float_feature(*world_bbox_l))
f['target/world_bbox_x'].MergeFrom(_float_feature(*world_bbox_x))
f['target/world_bbox_y'].MergeFrom(_float_feature(*world_bbox_y))
f['target/world_bbox_z'].MergeFrom(_float_feature(*world_bbox_z))
f['target/world_bbox_rot_y'].MergeFrom(_float_feature(*world_bbox_rot_y))
return example.SerializeToString()
@pytest.fixture
def _test_experiment_spec():
serialized_example = create_sample_example()
experiment_spec, _ = load_experiment_spec()
record_path = "tmp_tfrecord"
idx_path = "idx-tmp_tfrecord"
with tf.io.TFRecordWriter(record_path) as writer:
writer.write(serialized_example)
create_tfrecord_idx(record_path, idx_path)
experiment_spec.dataset_config.data_sources[0].tfrecords_path = record_path
experiment_spec.training_config.batch_size_per_gpu = 1
yield experiment_spec
os.remove(record_path)
os.remove(idx_path)
def test_dali_dataset(_test_experiment_spec):
dali_dataset = SSDDALIDataset(experiment_spec=_test_experiment_spec,
device_id=0,
shard_id=0,
num_shards=1)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(0)
sess = tf.Session(config=config)
K.set_session(sess)
imgs, tf_labels = K.get_session().run([dali_dataset.images, dali_dataset.labels])
assert imgs.shape == (1, 3, 300, 300)
assert tf_labels[0].shape == (1, 5)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/builders/tests/test_dali_dataloader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test ssd eval builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Input, Reshape
from keras.models import Model
import pytest
from nvidia_tao_tf1.cv.ssd.builders import eval_builder
@pytest.fixture
def test_model():
x = Input(shape=(3, 40, 40))
y = Reshape(target_shape=(300, 16))(x)
model = Model(inputs=x, outputs=y)
return model
def test_decoded_output(test_model):
model = eval_builder.build(test_model)
assert len(model.outputs) == 1
assert model.outputs[0].shape[1] == 200
assert model.outputs[0].shape[2] == 6
def test_decoded_output_with_encoded_prediction(test_model):
model = eval_builder.build(test_model,
include_encoded_pred=True)
assert len(model.outputs) == 2
assert model.outputs[0].shape[1] == 300
assert model.outputs[0].shape[2] == 16
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/builders/tests/test_eval_builder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test ssd keras sequence dataloader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import numpy as np
from PIL import Image
import pytest
from nvidia_tao_tf1.cv.ssd.builders.data_sequence import SSDDataSequence
from nvidia_tao_tf1.cv.ssd.utils.spec_loader import load_experiment_spec
@pytest.fixture
def _test_experiment_spec():
img = np.random.randint(low=0, high=255, size=(375, 500, 3), dtype=np.uint8)
gt = ["bicycle 0 0 0 1 45 493 372 0 0 0 0 0 0 0",
"bicycle 0 0 0 54 24 500 326 0 0 0 0 0 0 0",
"bicycle 0 0 0 54 326 500 326 0 0 0 0 0 0 0"]
experiment_spec, _ = load_experiment_spec()
if not os.path.exists("tmp_labels/"):
os.mkdir("tmp_labels/")
with open("tmp_labels/0.txt", "w") as f:
for line in gt:
f.write(line + "\n")
if not os.path.exists("tmp_imgs/"):
os.mkdir("tmp_imgs/")
tmp_im = Image.fromarray(img)
tmp_im.save("tmp_imgs/0.jpg")
experiment_spec.dataset_config.data_sources[0].label_directory_path = "tmp_labels/"
experiment_spec.dataset_config.data_sources[0].image_directory_path = "tmp_imgs/"
experiment_spec.dataset_config.validation_data_sources[0].label_directory_path = "tmp_labels/"
experiment_spec.dataset_config.validation_data_sources[0].image_directory_path = "tmp_imgs/"
yield experiment_spec
shutil.rmtree("tmp_labels")
shutil.rmtree("tmp_imgs")
def test_data_sequence(_test_experiment_spec):
# init dataloader:
train_dataset = SSDDataSequence(dataset_config=_test_experiment_spec.dataset_config,
augmentation_config=_test_experiment_spec.augmentation_config,
batch_size=1,
is_training=True,
encode_fn=None)
val_dataset = SSDDataSequence(dataset_config=_test_experiment_spec.dataset_config,
augmentation_config=_test_experiment_spec.augmentation_config,
batch_size=1,
is_training=False,
encode_fn=None)
# test load gt label for train
train_imgs, train_labels = train_dataset[0]
val_imgs, val_labels = val_dataset[0]
assert train_labels[0].shape[-1] == 5
assert val_labels[0].shape[-1] == 6
# test filter wrong gt label
assert val_labels[0].shape[0] == 2
# test preprocess
assert train_imgs[0].shape == (3, 300, 300)
assert val_imgs[0].shape == (3, 300, 300)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/builders/tests/test_data_sequence.py |
'''
Includes:
* Function to compute the IoU similarity for axis-aligned, rectangular, 2D bounding boxes
* Function for coordinate conversion for axis-aligned, rectangular, 2D bounding boxes
Copyright (C) 2018 Pierluigi Ferrari
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from __future__ import division
import numpy as np
def convert_coordinates(tensor, start_index, conversion, border_pixels='half'):
'''
Convert coordinates for axis-aligned 2D boxes between two coordinate formats.
Creates a copy of `tensor`, i.e. does not operate in place. Currently there are
three supported coordinate formats that can be converted from and to each other:
1) (xmin, xmax, ymin, ymax) - the 'minmax' format
2) (xmin, ymin, xmax, ymax) - the 'corners' format
2) (cx, cy, w, h) - the 'centroids' format
Arguments:
tensor (array): A Numpy nD array containing the four consecutive coordinates
to be converted somewhere in the last axis.
start_index (int): The index of the first coordinate in the last axis of `tensor`.
conversion (str, optional): The conversion direction. Can be 'minmax2centroids',
'centroids2minmax', 'corners2centroids', 'centroids2corners', 'minmax2corners',
or 'corners2minmax'.
border_pixels (str, optional): How to treat the border pixels of the bounding boxes.
Can be 'include', 'exclude', or 'half'. If 'include', the border pixels belong
to the boxes. If 'exclude', the border pixels do not belong to the boxes.
If 'half', then one of each of the two horizontal and vertical borders belong
to the boxex, but not the other.
Returns:
A Numpy nD array, a copy of the input tensor with the converted coordinates
in place of the original coordinates and the unaltered elements of the original
tensor elsewhere.
'''
if border_pixels == 'half':
d = 0
elif border_pixels == 'include':
d = 1
elif border_pixels == 'exclude':
d = -1
ind = start_index
tensor1 = np.copy(tensor).astype(np.float)
if conversion == 'minmax2centroids':
tensor1[..., ind] = (tensor[..., ind] + tensor[..., ind+1]) / 2.0 # Set cx
tensor1[..., ind+1] = (tensor[..., ind+2] + tensor[..., ind+3]) / 2.0 # Set cy
tensor1[..., ind+2] = tensor[..., ind+1] - tensor[..., ind] + d # Set w
tensor1[..., ind+3] = tensor[..., ind+3] - tensor[..., ind+2] + d # Set h
elif conversion == 'centroids2minmax':
tensor1[..., ind] = tensor[..., ind] - tensor[..., ind+2] / 2.0 # Set xmin
tensor1[..., ind+1] = tensor[..., ind] + tensor[..., ind+2] / 2.0 # Set xmax
tensor1[..., ind+2] = tensor[..., ind+1] - tensor[..., ind+3] / 2.0 # Set ymin
tensor1[..., ind+3] = tensor[..., ind+1] + tensor[..., ind+3] / 2.0 # Set ymax
elif conversion == 'corners2centroids':
tensor1[..., ind] = (tensor[..., ind] + tensor[..., ind+2]) / 2.0 # Set cx
tensor1[..., ind+1] = (tensor[..., ind+1] + tensor[..., ind+3]) / 2.0 # Set cy
tensor1[..., ind+2] = tensor[..., ind+2] - tensor[..., ind] + d # Set w
tensor1[..., ind+3] = tensor[..., ind+3] - tensor[..., ind+1] + d # Set h
elif conversion == 'centroids2corners':
tensor1[..., ind] = tensor[..., ind] - tensor[..., ind+2] / 2.0 # Set xmin
tensor1[..., ind+1] = tensor[..., ind+1] - tensor[..., ind+3] / 2.0 # Set ymin
tensor1[..., ind+2] = tensor[..., ind] + tensor[..., ind+2] / 2.0 # Set xmax
tensor1[..., ind+3] = tensor[..., ind+1] + tensor[..., ind+3] / 2.0 # Set ymax
elif (conversion == 'minmax2corners') or (conversion == 'corners2minmax'):
tensor1[..., ind+1] = tensor[..., ind+2]
tensor1[..., ind+2] = tensor[..., ind+1]
else:
raise ValueError("Unexpected conversion value. Supported values are 'minmax2centroids', 'centroids2minmax', 'corners2centroids', 'centroids2corners', 'minmax2corners', and 'corners2minmax'.")
return tensor1
def convert_coordinates2(tensor, start_index, conversion):
'''
A matrix multiplication implementation of `convert_coordinates()`.
Supports only conversion between the 'centroids' and 'minmax' formats.
This function is marginally slower on average than `convert_coordinates()`,
probably because it involves more (unnecessary) arithmetic operations (unnecessary
because the two matrices are sparse).
For details please refer to the documentation of `convert_coordinates()`.
'''
ind = start_index
tensor1 = np.copy(tensor).astype(np.float)
if conversion == 'minmax2centroids':
M = np.array([[0.5, 0. , -1., 0.],
[0.5, 0. , 1., 0.],
[0. , 0.5, 0., -1.],
[0. , 0.5, 0., 1.]])
tensor1[..., ind:ind+4] = np.dot(tensor1[..., ind:ind+4], M)
elif conversion == 'centroids2minmax':
M = np.array([[ 1. , 1. , 0. , 0. ],
[ 0. , 0. , 1. , 1. ],
[-0.5, 0.5, 0. , 0. ],
[ 0. , 0. , -0.5, 0.5]]) # The multiplicative inverse of the matrix above
tensor1[..., ind:ind+4] = np.dot(tensor1[..., ind:ind+4], M)
else:
raise ValueError("Unexpected conversion value. Supported values are 'minmax2centroids' and 'centroids2minmax'.")
return tensor1
def intersection_area(boxes1, boxes2, coords='centroids', mode='outer_product', border_pixels='half'):
'''
Computes the intersection areas of two sets of axis-aligned 2D rectangular boxes.
Let `boxes1` and `boxes2` contain `m` and `n` boxes, respectively.
In 'outer_product' mode, returns an `(m,n)` matrix with the intersection areas for all possible
combinations of the boxes in `boxes1` and `boxes2`.
In 'element-wise' mode, `m` and `n` must be broadcast-compatible. Refer to the explanation
of the `mode` argument for details.
Arguments:
boxes1 (array): Either a 1D Numpy array of shape `(4, )` containing the coordinates for one box in the
format specified by `coords` or a 2D Numpy array of shape `(m, 4)` containing the coordinates for `m` boxes.
If `mode` is set to 'element_wise', the shape must be broadcast-compatible with `boxes2`.
boxes2 (array): Either a 1D Numpy array of shape `(4, )` containing the coordinates for one box in the
format specified by `coords` or a 2D Numpy array of shape `(n, 4)` containing the coordinates for `n` boxes.
If `mode` is set to 'element_wise', the shape must be broadcast-compatible with `boxes1`.
coords (str, optional): The coordinate format in the input arrays. Can be either 'centroids' for the format
`(cx, cy, w, h)`, 'minmax' for the format `(xmin, xmax, ymin, ymax)`, or 'corners' for the format
`(xmin, ymin, xmax, ymax)`.
mode (str, optional): Can be one of 'outer_product' and 'element-wise'. In 'outer_product' mode, returns an
`(m,n)` matrix with the intersection areas for all possible combinations of the `m` boxes in `boxes1` with the
`n` boxes in `boxes2`. In 'element-wise' mode, returns a 1D array and the shapes of `boxes1` and `boxes2`
must be boadcast-compatible. If both `boxes1` and `boxes2` have `m` boxes, then this returns an array of
length `m` where the i-th position contains the intersection area of `boxes1[i]` with `boxes2[i]`.
border_pixels (str, optional): How to treat the border pixels of the bounding boxes.
Can be 'include', 'exclude', or 'half'. If 'include', the border pixels belong
to the boxes. If 'exclude', the border pixels do not belong to the boxes.
If 'half', then one of each of the two horizontal and vertical borders belong
to the boxex, but not the other.
Returns:
A 1D or 2D Numpy array (refer to the `mode` argument for details) of dtype float containing values with
the intersection areas of the boxes in `boxes1` and `boxes2`.
'''
# Make sure the boxes have the right shapes.
if boxes1.ndim > 2: raise ValueError("boxes1 must have rank either 1 or 2, but has rank {}.".format(boxes1.ndim))
if boxes2.ndim > 2: raise ValueError("boxes2 must have rank either 1 or 2, but has rank {}.".format(boxes2.ndim))
if boxes1.ndim == 1: boxes1 = np.expand_dims(boxes1, axis=0)
if boxes2.ndim == 1: boxes2 = np.expand_dims(boxes2, axis=0)
if not (boxes1.shape[1] == boxes2.shape[1] == 4): raise ValueError("All boxes must consist of 4 coordinates, but the boxes in `boxes1` and `boxes2` have {} and {} coordinates, respectively.".format(boxes1.shape[1], boxes2.shape[1]))
if not mode in {'outer_product', 'element-wise'}: raise ValueError("`mode` must be one of 'outer_product' and 'element-wise', but got '{}'.",format(mode))
# Convert the coordinates if necessary.
if coords == 'centroids':
boxes1 = convert_coordinates(boxes1, start_index=0, conversion='centroids2corners')
boxes2 = convert_coordinates(boxes2, start_index=0, conversion='centroids2corners')
coords = 'corners'
elif not (coords in {'minmax', 'corners'}):
raise ValueError("Unexpected value for `coords`. Supported values are 'minmax', 'corners' and 'centroids'.")
m = boxes1.shape[0] # The number of boxes in `boxes1`
n = boxes2.shape[0] # The number of boxes in `boxes2`
# Set the correct coordinate indices for the respective formats.
if coords == 'corners':
xmin = 0
ymin = 1
xmax = 2
ymax = 3
elif coords == 'minmax':
xmin = 0
xmax = 1
ymin = 2
ymax = 3
if border_pixels == 'half':
d = 0
elif border_pixels == 'include':
d = 1 # If border pixels are supposed to belong to the bounding boxes, we have to add one pixel to any difference `xmax - xmin` or `ymax - ymin`.
elif border_pixels == 'exclude':
d = -1 # If border pixels are not supposed to belong to the bounding boxes, we have to subtract one pixel from any difference `xmax - xmin` or `ymax - ymin`.
# Compute the intersection areas.
if mode == 'outer_product':
# For all possible box combinations, get the greater xmin and ymin values.
# This is a tensor of shape (m,n,2).
min_xy = np.maximum(np.tile(np.expand_dims(boxes1[:,[xmin,ymin]], axis=1), reps=(1, n, 1)),
np.tile(np.expand_dims(boxes2[:,[xmin,ymin]], axis=0), reps=(m, 1, 1)))
# For all possible box combinations, get the smaller xmax and ymax values.
# This is a tensor of shape (m,n,2).
max_xy = np.minimum(np.tile(np.expand_dims(boxes1[:,[xmax,ymax]], axis=1), reps=(1, n, 1)),
np.tile(np.expand_dims(boxes2[:,[xmax,ymax]], axis=0), reps=(m, 1, 1)))
# Compute the side lengths of the intersection rectangles.
side_lengths = np.maximum(0, max_xy - min_xy + d)
return side_lengths[:,:,0] * side_lengths[:,:,1]
elif mode == 'element-wise':
min_xy = np.maximum(boxes1[:,[xmin,ymin]], boxes2[:,[xmin,ymin]])
max_xy = np.minimum(boxes1[:,[xmax,ymax]], boxes2[:,[xmax,ymax]])
# Compute the side lengths of the intersection rectangles.
side_lengths = np.maximum(0, max_xy - min_xy + d)
return side_lengths[:,0] * side_lengths[:,1]
def intersection_area_(boxes1, boxes2, coords='corners', mode='outer_product', border_pixels='half'):
'''
The same as 'intersection_area()' but for internal use, i.e. without all the safety checks.
'''
m = boxes1.shape[0] # The number of boxes in `boxes1`
n = boxes2.shape[0] # The number of boxes in `boxes2`
# Set the correct coordinate indices for the respective formats.
if coords == 'corners':
xmin = 0
ymin = 1
xmax = 2
ymax = 3
elif coords == 'minmax':
xmin = 0
xmax = 1
ymin = 2
ymax = 3
if border_pixels == 'half':
d = 0
elif border_pixels == 'include':
d = 1 # If border pixels are supposed to belong to the bounding boxes, we have to add one pixel to any difference `xmax - xmin` or `ymax - ymin`.
elif border_pixels == 'exclude':
d = -1 # If border pixels are not supposed to belong to the bounding boxes, we have to subtract one pixel from any difference `xmax - xmin` or `ymax - ymin`.
# Compute the intersection areas.
if mode == 'outer_product':
# For all possible box combinations, get the greater xmin and ymin values.
# This is a tensor of shape (m,n,2).
min_xy = np.maximum(np.tile(np.expand_dims(boxes1[:,[xmin,ymin]], axis=1), reps=(1, n, 1)),
np.tile(np.expand_dims(boxes2[:,[xmin,ymin]], axis=0), reps=(m, 1, 1)))
# For all possible box combinations, get the smaller xmax and ymax values.
# This is a tensor of shape (m,n,2).
max_xy = np.minimum(np.tile(np.expand_dims(boxes1[:,[xmax,ymax]], axis=1), reps=(1, n, 1)),
np.tile(np.expand_dims(boxes2[:,[xmax,ymax]], axis=0), reps=(m, 1, 1)))
# Compute the side lengths of the intersection rectangles.
side_lengths = np.maximum(0, max_xy - min_xy + d)
return side_lengths[:,:,0] * side_lengths[:,:,1]
elif mode == 'element-wise':
min_xy = np.maximum(boxes1[:,[xmin,ymin]], boxes2[:,[xmin,ymin]])
max_xy = np.minimum(boxes1[:,[xmax,ymax]], boxes2[:,[xmax,ymax]])
# Compute the side lengths of the intersection rectangles.
side_lengths = np.maximum(0, max_xy - min_xy + d)
return side_lengths[:,0] * side_lengths[:,1]
def iou(boxes1, boxes2, coords='centroids', mode='outer_product', border_pixels='half'):
'''
Computes the intersection-over-union similarity (also known as Jaccard similarity)
of two sets of axis-aligned 2D rectangular boxes.
Let `boxes1` and `boxes2` contain `m` and `n` boxes, respectively.
In 'outer_product' mode, returns an `(m,n)` matrix with the IoUs for all possible
combinations of the boxes in `boxes1` and `boxes2`.
In 'element-wise' mode, `m` and `n` must be broadcast-compatible. Refer to the explanation
of the `mode` argument for details.
Arguments:
boxes1 (array): Either a 1D Numpy array of shape `(4, )` containing the coordinates for one box in the
format specified by `coords` or a 2D Numpy array of shape `(m, 4)` containing the coordinates for `m` boxes.
If `mode` is set to 'element_wise', the shape must be broadcast-compatible with `boxes2`.
boxes2 (array): Either a 1D Numpy array of shape `(4, )` containing the coordinates for one box in the
format specified by `coords` or a 2D Numpy array of shape `(n, 4)` containing the coordinates for `n` boxes.
If `mode` is set to 'element_wise', the shape must be broadcast-compatible with `boxes1`.
coords (str, optional): The coordinate format in the input arrays. Can be either 'centroids' for the format
`(cx, cy, w, h)`, 'minmax' for the format `(xmin, xmax, ymin, ymax)`, or 'corners' for the format
`(xmin, ymin, xmax, ymax)`.
mode (str, optional): Can be one of 'outer_product' and 'element-wise'. In 'outer_product' mode, returns an
`(m,n)` matrix with the IoU overlaps for all possible combinations of the `m` boxes in `boxes1` with the
`n` boxes in `boxes2`. In 'element-wise' mode, returns a 1D array and the shapes of `boxes1` and `boxes2`
must be boadcast-compatible. If both `boxes1` and `boxes2` have `m` boxes, then this returns an array of
length `m` where the i-th position contains the IoU overlap of `boxes1[i]` with `boxes2[i]`.
border_pixels (str, optional): How to treat the border pixels of the bounding boxes.
Can be 'include', 'exclude', or 'half'. If 'include', the border pixels belong
to the boxes. If 'exclude', the border pixels do not belong to the boxes.
If 'half', then one of each of the two horizontal and vertical borders belong
to the boxex, but not the other.
Returns:
A 1D or 2D Numpy array (refer to the `mode` argument for details) of dtype float containing values in [0,1],
the Jaccard similarity of the boxes in `boxes1` and `boxes2`. 0 means there is no overlap between two given
boxes, 1 means their coordinates are identical.
'''
# Make sure the boxes have the right shapes.
if boxes1.ndim > 2: raise ValueError("boxes1 must have rank either 1 or 2, but has rank {}.".format(boxes1.ndim))
if boxes2.ndim > 2: raise ValueError("boxes2 must have rank either 1 or 2, but has rank {}.".format(boxes2.ndim))
if boxes1.ndim == 1: boxes1 = np.expand_dims(boxes1, axis=0)
if boxes2.ndim == 1: boxes2 = np.expand_dims(boxes2, axis=0)
if not (boxes1.shape[1] == boxes2.shape[1] == 4): raise ValueError("All boxes must consist of 4 coordinates, but the boxes in `boxes1` and `boxes2` have {} and {} coordinates, respectively.".format(boxes1.shape[1], boxes2.shape[1]))
if not mode in {'outer_product', 'element-wise'}: raise ValueError("`mode` must be one of 'outer_product' and 'element-wise', but got '{}'.".format(mode))
# Convert the coordinates if necessary.
if coords == 'centroids':
boxes1 = convert_coordinates(boxes1, start_index=0, conversion='centroids2corners')
boxes2 = convert_coordinates(boxes2, start_index=0, conversion='centroids2corners')
coords = 'corners'
elif not (coords in {'minmax', 'corners'}):
raise ValueError("Unexpected value for `coords`. Supported values are 'minmax', 'corners' and 'centroids'.")
# Compute the IoU.
# Compute the interesection areas.
intersection_areas = intersection_area_(boxes1, boxes2, coords=coords, mode=mode)
m = boxes1.shape[0] # The number of boxes in `boxes1`
n = boxes2.shape[0] # The number of boxes in `boxes2`
# Compute the union areas.
# Set the correct coordinate indices for the respective formats.
if coords == 'corners':
xmin = 0
ymin = 1
xmax = 2
ymax = 3
elif coords == 'minmax':
xmin = 0
xmax = 1
ymin = 2
ymax = 3
if border_pixels == 'half':
d = 0
elif border_pixels == 'include':
d = 1 # If border pixels are supposed to belong to the bounding boxes, we have to add one pixel to any difference `xmax - xmin` or `ymax - ymin`.
elif border_pixels == 'exclude':
d = -1 # If border pixels are not supposed to belong to the bounding boxes, we have to subtract one pixel from any difference `xmax - xmin` or `ymax - ymin`.
if mode == 'outer_product':
boxes1_areas = np.tile(np.expand_dims((boxes1[:,xmax] - boxes1[:,xmin] + d) * (boxes1[:,ymax] - boxes1[:,ymin] + d), axis=1), reps=(1,n))
boxes2_areas = np.tile(np.expand_dims((boxes2[:,xmax] - boxes2[:,xmin] + d) * (boxes2[:,ymax] - boxes2[:,ymin] + d), axis=0), reps=(m,1))
elif mode == 'element-wise':
boxes1_areas = (boxes1[:,xmax] - boxes1[:,xmin] + d) * (boxes1[:,ymax] - boxes1[:,ymin] + d)
boxes2_areas = (boxes2[:,xmax] - boxes2[:,xmin] + d) * (boxes2[:,ymax] - boxes2[:,ymin] + d)
union_areas = boxes1_areas + boxes2_areas - intersection_areas
return intersection_areas / union_areas
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/builders/data_generator/bounding_box_utils.py |
'''
Utilities for 2D object detection related to answering the following questions:
1. Given an image size and bounding boxes, which bounding boxes meet certain
requirements with respect to the image size?
2. Given an image size and bounding boxes, is an image of that size valid with
respect to the bounding boxes according to certain requirements?
Copyright (C) 2018 Pierluigi Ferrari
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from __future__ import division
import numpy as np
from nvidia_tao_tf1.cv.ssd.builders.data_generator.bounding_box_utils import iou
class BoundGenerator:
'''
Generates pairs of floating point values that represent lower and upper bounds
from a given sample space.
'''
def __init__(self,
sample_space=((0.1, None),
(0.3, None),
(0.5, None),
(0.7, None),
(0.9, None),
(None, None)),
weights=None):
'''
Arguments:
sample_space (list or tuple): A list, tuple, or array-like object of shape
`(n, 2)` that contains `n` samples to choose from, where each sample
is a 2-tuple of scalars and/or `None` values.
weights (list or tuple, optional): A list or tuple representing the distribution
over the sample space. If `None`, a uniform distribution will be assumed.
'''
if (not (weights is None)) and len(weights) != len(sample_space):
raise ValueError("`weights` must either be `None` for uniform distribution or have the same length as `sample_space`.")
self.sample_space = []
for bound_pair in sample_space:
if len(bound_pair) != 2:
raise ValueError("All elements of the sample space must be 2-tuples.")
bound_pair = list(bound_pair)
if bound_pair[0] is None: bound_pair[0] = 0.0
if bound_pair[1] is None: bound_pair[1] = 1.0
if bound_pair[0] > bound_pair[1]:
raise ValueError("For all sample space elements, the lower bound cannot be greater than the upper bound.")
self.sample_space.append(bound_pair)
self.sample_space_size = len(self.sample_space)
if weights is None:
self.weights = [1.0/self.sample_space_size] * self.sample_space_size
else:
self.weights = weights
def __call__(self):
'''
Returns:
An item of the sample space, i.e. a 2-tuple of scalars.
'''
i = np.random.choice(self.sample_space_size, p=self.weights)
return self.sample_space[i]
class BoxFilter:
'''
Returns all bounding boxes that are valid with respect to a the defined criteria.
'''
def __init__(self,
check_overlap=True,
check_min_area=True,
check_degenerate=True,
overlap_criterion='center_point',
overlap_bounds=(0.3, 1.0),
min_area=16,
labels_format={'class_id': 0, 'xmin': 1, 'ymin': 2, 'xmax': 3, 'ymax': 4},
border_pixels='half'):
'''
Arguments:
check_overlap (bool, optional): Whether or not to enforce the overlap requirements defined by
`overlap_criterion` and `overlap_bounds`. Sometimes you might want to use the box filter only
to enforce a certain minimum area for all boxes (see next argument), in such cases you can
turn the overlap requirements off.
check_min_area (bool, optional): Whether or not to enforce the minimum area requirement defined
by `min_area`. If `True`, any boxes that have an area (in pixels) that is smaller than `min_area`
will be removed from the labels of an image. Bounding boxes below a certain area aren't useful
training examples. An object that takes up only, say, 5 pixels in an image is probably not
recognizable anymore, neither for a human, nor for an object detection model. It makes sense
to remove such boxes.
check_degenerate (bool, optional): Whether or not to check for and remove degenerate bounding boxes.
Degenerate bounding boxes are boxes that have `xmax <= xmin` and/or `ymax <= ymin`. In particular,
boxes with a width and/or height of zero are degenerate. It is obviously important to filter out
such boxes, so you should only set this option to `False` if you are certain that degenerate
boxes are not possible in your data and processing chain.
overlap_criterion (str, optional): Can be either of 'center_point', 'iou', or 'area'. Determines
which boxes are considered valid with respect to a given image. If set to 'center_point',
a given bounding box is considered valid if its center point lies within the image.
If set to 'area', a given bounding box is considered valid if the quotient of its intersection
area with the image and its own area is within the given `overlap_bounds`. If set to 'iou', a given
bounding box is considered valid if its IoU with the image is within the given `overlap_bounds`.
overlap_bounds (list or BoundGenerator, optional): Only relevant if `overlap_criterion` is 'area' or 'iou'.
Determines the lower and upper bounds for `overlap_criterion`. Can be either a 2-tuple of scalars
representing a lower bound and an upper bound, or a `BoundGenerator` object, which provides
the possibility to generate bounds randomly.
min_area (int, optional): Only relevant if `check_min_area` is `True`. Defines the minimum area in
pixels that a bounding box must have in order to be valid. Boxes with an area smaller than this
will be removed.
labels_format (dict, optional): A dictionary that defines which index in the last axis of the labels
of an image contains which bounding box coordinate. The dictionary maps at least the keywords
'xmin', 'ymin', 'xmax', and 'ymax' to their respective indices within last axis of the labels array.
border_pixels (str, optional): How to treat the border pixels of the bounding boxes.
Can be 'include', 'exclude', or 'half'. If 'include', the border pixels belong
to the boxes. If 'exclude', the border pixels do not belong to the boxes.
If 'half', then one of each of the two horizontal and vertical borders belong
to the boxex, but not the other.
'''
if not isinstance(overlap_bounds, (list, tuple, BoundGenerator)):
raise ValueError("`overlap_bounds` must be either a 2-tuple of scalars or a `BoundGenerator` object.")
if isinstance(overlap_bounds, (list, tuple)) and (overlap_bounds[0] > overlap_bounds[1]):
raise ValueError("The lower bound must not be greater than the upper bound.")
if not (overlap_criterion in {'iou', 'area', 'center_point'}):
raise ValueError("`overlap_criterion` must be one of 'iou', 'area', or 'center_point'.")
self.overlap_criterion = overlap_criterion
self.overlap_bounds = overlap_bounds
self.min_area = min_area
self.check_overlap = check_overlap
self.check_min_area = check_min_area
self.check_degenerate = check_degenerate
self.labels_format = labels_format
self.border_pixels = border_pixels
def __call__(self,
labels,
image_height=None,
image_width=None):
'''
Arguments:
labels (array): The labels to be filtered. This is an array with shape `(m,n)`, where
`m` is the number of bounding boxes and `n` is the number of elements that defines
each bounding box (box coordinates, class ID, etc.). The box coordinates are expected
to be in the image's coordinate system.
image_height (int): Only relevant if `check_overlap == True`. The height of the image
(in pixels) to compare the box coordinates to.
image_width (int): `check_overlap == True`. The width of the image (in pixels) to compare
the box coordinates to.
Returns:
An array containing the labels of all boxes that are valid.
'''
labels = np.copy(labels)
xmin = self.labels_format['xmin']
ymin = self.labels_format['ymin']
xmax = self.labels_format['xmax']
ymax = self.labels_format['ymax']
# Record the boxes that pass all checks here.
requirements_met = np.ones(shape=labels.shape[0], dtype=np.bool)
if self.check_degenerate:
non_degenerate = (labels[:,xmax] > labels[:,xmin]) * (labels[:,ymax] > labels[:,ymin])
requirements_met *= non_degenerate
if self.check_min_area:
min_area_met = (labels[:,xmax] - labels[:,xmin]) * (labels[:,ymax] - labels[:,ymin]) >= self.min_area
requirements_met *= min_area_met
if self.check_overlap:
# Get the lower and upper bounds.
if isinstance(self.overlap_bounds, BoundGenerator):
lower, upper = self.overlap_bounds()
else:
lower, upper = self.overlap_bounds
# Compute which boxes are valid.
if self.overlap_criterion == 'iou':
# Compute the patch coordinates.
image_coords = np.array([0, 0, image_width, image_height])
# Compute the IoU between the patch and all of the ground truth boxes.
image_boxes_iou = iou(image_coords, labels[:, [xmin, ymin, xmax, ymax]], coords='corners', mode='element-wise', border_pixels=self.border_pixels)
requirements_met *= (image_boxes_iou > lower) * (image_boxes_iou <= upper)
elif self.overlap_criterion == 'area':
if self.border_pixels == 'half':
d = 0
elif self.border_pixels == 'include':
d = 1 # If border pixels are supposed to belong to the bounding boxes, we have to add one pixel to any difference `xmax - xmin` or `ymax - ymin`.
elif self.border_pixels == 'exclude':
d = -1 # If border pixels are not supposed to belong to the bounding boxes, we have to subtract one pixel from any difference `xmax - xmin` or `ymax - ymin`.
# Compute the areas of the boxes.
box_areas = (labels[:,xmax] - labels[:,xmin] + d) * (labels[:,ymax] - labels[:,ymin] + d)
# Compute the intersection area between the patch and all of the ground truth boxes.
clipped_boxes = np.copy(labels)
clipped_boxes[:,[ymin,ymax]] = np.clip(labels[:,[ymin,ymax]], a_min=0, a_max=image_height-1)
clipped_boxes[:,[xmin,xmax]] = np.clip(labels[:,[xmin,xmax]], a_min=0, a_max=image_width-1)
intersection_areas = (clipped_boxes[:,xmax] - clipped_boxes[:,xmin] + d) * (clipped_boxes[:,ymax] - clipped_boxes[:,ymin] + d) # +1 because the border pixels belong to the box areas.
# Check which boxes meet the overlap requirements.
if lower == 0.0:
mask_lower = intersection_areas > lower * box_areas # If `self.lower == 0`, we want to make sure that boxes with area 0 don't count, hence the ">" sign instead of the ">=" sign.
else:
mask_lower = intersection_areas >= lower * box_areas # Especially for the case `self.lower == 1` we want the ">=" sign, otherwise no boxes would count at all.
mask_upper = intersection_areas <= upper * box_areas
requirements_met *= mask_lower * mask_upper
elif self.overlap_criterion == 'center_point':
# Compute the center points of the boxes.
cy = (labels[:,ymin] + labels[:,ymax]) / 2
cx = (labels[:,xmin] + labels[:,xmax]) / 2
# Check which of the boxes have center points within the cropped patch remove those that don't.
requirements_met *= (cy >= 0.0) * (cy <= image_height-1) * (cx >= 0.0) * (cx <= image_width-1)
return labels[requirements_met]
class ImageValidator:
'''
Returns `True` if a given minimum number of bounding boxes meets given overlap
requirements with an image of a given height and width.
'''
def __init__(self,
overlap_criterion='center_point',
bounds=(0.3, 1.0),
n_boxes_min=1,
labels_format={'class_id': 0, 'xmin': 1, 'ymin': 2, 'xmax': 3, 'ymax': 4},
border_pixels='half'):
'''
Arguments:
overlap_criterion (str, optional): Can be either of 'center_point', 'iou', or 'area'. Determines
which boxes are considered valid with respect to a given image. If set to 'center_point',
a given bounding box is considered valid if its center point lies within the image.
If set to 'area', a given bounding box is considered valid if the quotient of its intersection
area with the image and its own area is within `lower` and `upper`. If set to 'iou', a given
bounding box is considered valid if its IoU with the image is within `lower` and `upper`.
bounds (list or BoundGenerator, optional): Only relevant if `overlap_criterion` is 'area' or 'iou'.
Determines the lower and upper bounds for `overlap_criterion`. Can be either a 2-tuple of scalars
representing a lower bound and an upper bound, or a `BoundGenerator` object, which provides
the possibility to generate bounds randomly.
n_boxes_min (int or str, optional): Either a non-negative integer or the string 'all'.
Determines the minimum number of boxes that must meet the `overlap_criterion` with respect to
an image of the given height and width in order for the image to be a valid image.
If set to 'all', an image is considered valid if all given boxes meet the `overlap_criterion`.
labels_format (dict, optional): A dictionary that defines which index in the last axis of the labels
of an image contains which bounding box coordinate. The dictionary maps at least the keywords
'xmin', 'ymin', 'xmax', and 'ymax' to their respective indices within last axis of the labels array.
border_pixels (str, optional): How to treat the border pixels of the bounding boxes.
Can be 'include', 'exclude', or 'half'. If 'include', the border pixels belong
to the boxes. If 'exclude', the border pixels do not belong to the boxes.
If 'half', then one of each of the two horizontal and vertical borders belong
to the boxex, but not the other.
'''
if not ((isinstance(n_boxes_min, int) and n_boxes_min > 0) or n_boxes_min == 'all'):
raise ValueError("`n_boxes_min` must be a positive integer or 'all'.")
self.overlap_criterion = overlap_criterion
self.bounds = bounds
self.n_boxes_min = n_boxes_min
self.labels_format = labels_format
self.border_pixels = border_pixels
self.box_filter = BoxFilter(check_overlap=True,
check_min_area=False,
check_degenerate=False,
overlap_criterion=self.overlap_criterion,
overlap_bounds=self.bounds,
labels_format=self.labels_format,
border_pixels=self.border_pixels)
def __call__(self,
labels,
image_height,
image_width):
'''
Arguments:
labels (array): The labels to be tested. The box coordinates are expected
to be in the image's coordinate system.
image_height (int): The height of the image to compare the box coordinates to.
image_width (int): The width of the image to compare the box coordinates to.
Returns:
A boolean indicating whether an imgae of the given height and width is
valid with respect to the given bounding boxes.
'''
self.box_filter.overlap_bounds = self.bounds
self.box_filter.labels_format = self.labels_format
# Get all boxes that meet the overlap requirements.
valid_labels = self.box_filter(labels=labels,
image_height=image_height,
image_width=image_width)
# Check whether enough boxes meet the requirements.
if isinstance(self.n_boxes_min, int):
# The image is valid if at least `self.n_boxes_min` ground truth boxes meet the requirements.
if len(valid_labels) >= self.n_boxes_min:
return True
else:
return False
elif self.n_boxes_min == 'all':
# The image is valid if all ground truth boxes meet the requirements.
if len(valid_labels) == len(labels):
return True
else:
return False
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/builders/data_generator/object_detection_2d_image_boxes_validation_utils.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/builders/data_generator/__init__.py |
|
'''
Miscellaneous data generator utilities.
Copyright (C) 2018 Pierluigi Ferrari
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from __future__ import division
import numpy as np
def apply_inverse_transforms(y_pred_decoded, inverse_transforms):
'''
Takes a list or Numpy array of decoded predictions and applies a given list of
transforms to them. The list of inverse transforms would usually contain the
inverter functions that some of the image transformations that come with this
data generator return. This function would normally be used to transform predictions
that were made on a transformed image back to the original image.
Arguments:
y_pred_decoded (list or array): Either a list of length `batch_size` that
contains Numpy arrays that contain the predictions for each batch item
or a Numpy array. If this is a list of Numpy arrays, the arrays would
usually have the shape `(num_predictions, 6)`, where `num_predictions`
is different for each batch item. If this is a Numpy array, it would
usually have the shape `(batch_size, num_predictions, 6)`. The last axis
would usually contain the class ID, confidence score, and four bounding
box coordinates for each prediction.
inverse_predictions (list): A nested list of length `batch_size` that contains
for each batch item a list of functions that take one argument (one element
of `y_pred_decoded` if it is a list or one slice along the first axis of
`y_pred_decoded` if it is an array) and return an output of the same shape
and data type.
Returns:
The transformed predictions, which have the same structure as `y_pred_decoded`.
'''
if isinstance(y_pred_decoded, list):
y_pred_decoded_inv = []
for i in range(len(y_pred_decoded)):
y_pred_decoded_inv.append(np.copy(y_pred_decoded[i]))
if y_pred_decoded_inv[i].size > 0: # If there are any predictions for this batch item.
for inverter in inverse_transforms[i]:
if not (inverter is None):
y_pred_decoded_inv[i] = inverter(y_pred_decoded_inv[i])
elif isinstance(y_pred_decoded, np.ndarray):
y_pred_decoded_inv = np.copy(y_pred_decoded)
for i in range(len(y_pred_decoded)):
if y_pred_decoded_inv[i].size > 0: # If there are any predictions for this batch item.
for inverter in inverse_transforms[i]:
if not (inverter is None):
y_pred_decoded_inv[i] = inverter(y_pred_decoded_inv[i])
else:
raise ValueError("`y_pred_decoded` must be either a list or a Numpy array.")
return y_pred_decoded_inv
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/builders/data_generator/object_detection_2d_misc_utils.py |
'''
Various geometric image transformations for 2D object detection, both deterministic
and probabilistic.
Copyright (C) 2018 Pierluigi Ferrari
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from __future__ import division
import numpy as np
import cv2
import random
from nvidia_tao_tf1.cv.ssd.builders.data_generator.object_detection_2d_image_boxes_validation_utils import BoxFilter, ImageValidator
class Resize:
'''
Resizes images to a specified height and width in pixels.
'''
def __init__(self,
height,
width,
interpolation_mode=cv2.INTER_LINEAR,
box_filter=None,
labels_format={'class_id': 0, 'xmin': 1, 'ymin': 2, 'xmax': 3, 'ymax': 4}):
'''
Arguments:
height (int): The desired height of the output images in pixels.
width (int): The desired width of the output images in pixels.
interpolation_mode (int, optional): An integer that denotes a valid
OpenCV interpolation mode. For example, integers 0 through 5 are
valid interpolation modes.
box_filter (BoxFilter, optional): Only relevant if ground truth bounding boxes are given.
A `BoxFilter` object to filter out bounding boxes that don't meet the given criteria
after the transformation. Refer to the `BoxFilter` documentation for details. If `None`,
the validity of the bounding boxes is not checked.
labels_format (dict, optional): A dictionary that defines which index in the last axis of the labels
of an image contains which bounding box coordinate. The dictionary maps at least the keywords
'xmin', 'ymin', 'xmax', and 'ymax' to their respective indices within last axis of the labels array.
'''
if not (isinstance(box_filter, BoxFilter) or box_filter is None):
raise ValueError("`box_filter` must be either `None` or a `BoxFilter` object.")
self.out_height = height
self.out_width = width
self.interpolation_mode = interpolation_mode
self.box_filter = box_filter
self.labels_format = labels_format
def __call__(self, image, labels=None, return_inverter=False):
img_height, img_width = image.shape[:2]
xmin = self.labels_format['xmin']
ymin = self.labels_format['ymin']
xmax = self.labels_format['xmax']
ymax = self.labels_format['ymax']
image = cv2.resize(image,
dsize=(self.out_width, self.out_height),
interpolation=self.interpolation_mode)
if return_inverter:
def inverter(labels):
labels = np.copy(labels)
labels[:, [ymin+1, ymax+1]] = np.round(labels[:, [ymin+1, ymax+1]] * (img_height / self.out_height), decimals=0)
labels[:, [xmin+1, xmax+1]] = np.round(labels[:, [xmin+1, xmax+1]] * (img_width / self.out_width), decimals=0)
return labels
if labels is None:
if return_inverter:
return image, inverter
else:
return image
else:
labels = np.copy(labels)
labels[:, [ymin, ymax]] = np.round(labels[:, [ymin, ymax]] * (self.out_height / img_height), decimals=0)
labels[:, [xmin, xmax]] = np.round(labels[:, [xmin, xmax]] * (self.out_width / img_width), decimals=0)
if not (self.box_filter is None):
self.box_filter.labels_format = self.labels_format
labels = self.box_filter(labels=labels,
image_height=self.out_height,
image_width=self.out_width)
if return_inverter:
return image, labels, inverter
else:
return image, labels
class ResizeRandomInterp:
'''
Resizes images to a specified height and width in pixels using a radnomly
selected interpolation mode.
'''
def __init__(self,
height,
width,
interpolation_modes=[cv2.INTER_NEAREST,
cv2.INTER_LINEAR,
cv2.INTER_CUBIC,
cv2.INTER_AREA,
cv2.INTER_LANCZOS4],
box_filter=None,
labels_format={'class_id': 0, 'xmin': 1, 'ymin': 2, 'xmax': 3, 'ymax': 4}):
'''
Arguments:
height (int): The desired height of the output image in pixels.
width (int): The desired width of the output image in pixels.
interpolation_modes (list/tuple, optional): A list/tuple of integers
that represent valid OpenCV interpolation modes. For example,
integers 0 through 5 are valid interpolation modes.
box_filter (BoxFilter, optional): Only relevant if ground truth bounding boxes are given.
A `BoxFilter` object to filter out bounding boxes that don't meet the given criteria
after the transformation. Refer to the `BoxFilter` documentation for details. If `None`,
the validity of the bounding boxes is not checked.
labels_format (dict, optional): A dictionary that defines which index in the last axis of the labels
of an image contains which bounding box coordinate. The dictionary maps at least the keywords
'xmin', 'ymin', 'xmax', and 'ymax' to their respective indices within last axis of the labels array.
'''
if not (isinstance(interpolation_modes, (list, tuple))):
raise ValueError("`interpolation_mode` must be a list or tuple.")
self.height = height
self.width = width
self.interpolation_modes = interpolation_modes
self.box_filter = box_filter
self.labels_format = labels_format
self.resize = Resize(height=self.height,
width=self.width,
box_filter=self.box_filter,
labels_format=self.labels_format)
def __call__(self, image, labels=None, return_inverter=False):
self.resize.interpolation_mode = np.random.choice(self.interpolation_modes)
self.resize.labels_format = self.labels_format
return self.resize(image, labels, return_inverter)
class Flip:
'''
Flips images horizontally or vertically.
'''
def __init__(self,
dim='horizontal',
labels_format={'class_id': 0, 'xmin': 1, 'ymin': 2, 'xmax': 3, 'ymax': 4}):
'''
Arguments:
dim (str, optional): Can be either of 'horizontal' and 'vertical'.
If 'horizontal', images will be flipped horizontally, i.e. along
the vertical axis. If 'horizontal', images will be flipped vertically,
i.e. along the horizontal axis.
labels_format (dict, optional): A dictionary that defines which index in the last axis of the labels
of an image contains which bounding box coordinate. The dictionary maps at least the keywords
'xmin', 'ymin', 'xmax', and 'ymax' to their respective indices within last axis of the labels array.
'''
if not (dim in {'horizontal', 'vertical'}): raise ValueError("`dim` can be one of 'horizontal' and 'vertical'.")
self.dim = dim
self.labels_format = labels_format
def __call__(self, image, labels=None, return_inverter=False):
img_height, img_width = image.shape[:2]
xmin = self.labels_format['xmin']
ymin = self.labels_format['ymin']
xmax = self.labels_format['xmax']
ymax = self.labels_format['ymax']
if self.dim == 'horizontal':
image = image[:,::-1]
if labels is None:
return image
else:
labels = np.copy(labels)
labels[:, [xmin, xmax]] = img_width - labels[:, [xmax, xmin]]
return image, labels
else:
image = image[::-1]
if labels is None:
return image
else:
labels = np.copy(labels)
labels[:, [ymin, ymax]] = img_height - labels[:, [ymax, ymin]]
return image, labels
class RandomFlip:
'''
Randomly flips images horizontally or vertically. The randomness only refers
to whether or not the image will be flipped.
'''
def __init__(self,
dim='horizontal',
prob=0.5,
labels_format={'class_id': 0, 'xmin': 1, 'ymin': 2, 'xmax': 3, 'ymax': 4}):
'''
Arguments:
dim (str, optional): Can be either of 'horizontal' and 'vertical'.
If 'horizontal', images will be flipped horizontally, i.e. along
the vertical axis. If 'horizontal', images will be flipped vertically,
i.e. along the horizontal axis.
prob (float, optional): `(1 - prob)` determines the probability with which the original,
unaltered image is returned.
labels_format (dict, optional): A dictionary that defines which index in the last axis of the labels
of an image contains which bounding box coordinate. The dictionary maps at least the keywords
'xmin', 'ymin', 'xmax', and 'ymax' to their respective indices within last axis of the labels array.
'''
self.dim = dim
self.prob = prob
self.labels_format = labels_format
self.flip = Flip(dim=self.dim, labels_format=self.labels_format)
def __call__(self, image, labels=None):
p = np.random.uniform(0,1)
if p >= (1.0-self.prob):
self.flip.labels_format = self.labels_format
return self.flip(image, labels)
elif labels is None:
return image
else:
return image, labels
class Translate:
'''
Translates images horizontally and/or vertically.
'''
def __init__(self,
dy,
dx,
clip_boxes=True,
box_filter=None,
background=(0,0,0),
labels_format={'class_id': 0, 'xmin': 1, 'ymin': 2, 'xmax': 3, 'ymax': 4}):
'''
Arguments:
dy (float): The fraction of the image height by which to translate images along the
vertical axis. Positive values translate images downwards, negative values
translate images upwards.
dx (float): The fraction of the image width by which to translate images along the
horizontal axis. Positive values translate images to the right, negative values
translate images to the left.
clip_boxes (bool, optional): Only relevant if ground truth bounding boxes are given.
If `True`, any ground truth bounding boxes will be clipped to lie entirely within the
image after the translation.
box_filter (BoxFilter, optional): Only relevant if ground truth bounding boxes are given.
A `BoxFilter` object to filter out bounding boxes that don't meet the given criteria
after the transformation. Refer to the `BoxFilter` documentation for details. If `None`,
the validity of the bounding boxes is not checked.
background (list/tuple, optional): A 3-tuple specifying the RGB color value of the
background pixels of the translated images.
labels_format (dict, optional): A dictionary that defines which index in the last axis of the labels
of an image contains which bounding box coordinate. The dictionary maps at least the keywords
'xmin', 'ymin', 'xmax', and 'ymax' to their respective indices within last axis of the labels array.
'''
if not (isinstance(box_filter, BoxFilter) or box_filter is None):
raise ValueError("`box_filter` must be either `None` or a `BoxFilter` object.")
self.dy_rel = dy
self.dx_rel = dx
self.clip_boxes = clip_boxes
self.box_filter = box_filter
self.background = background
self.labels_format = labels_format
def __call__(self, image, labels=None):
img_height, img_width = image.shape[:2]
# Compute the translation matrix.
dy_abs = int(round(img_height * self.dy_rel))
dx_abs = int(round(img_width * self.dx_rel))
M = np.float32([[1, 0, dx_abs],
[0, 1, dy_abs]])
# Translate the image.
image = cv2.warpAffine(image,
M=M,
dsize=(img_width, img_height),
borderMode=cv2.BORDER_CONSTANT,
borderValue=self.background)
if labels is None:
return image
else:
xmin = self.labels_format['xmin']
ymin = self.labels_format['ymin']
xmax = self.labels_format['xmax']
ymax = self.labels_format['ymax']
labels = np.copy(labels)
# Translate the box coordinates to the translated image's coordinate system.
labels[:,[xmin,xmax]] += dx_abs
labels[:,[ymin,ymax]] += dy_abs
# Compute all valid boxes for this patch.
if not (self.box_filter is None):
self.box_filter.labels_format = self.labels_format
labels = self.box_filter(labels=labels,
image_height=img_height,
image_width=img_width)
if self.clip_boxes:
labels[:,[ymin,ymax]] = np.clip(labels[:,[ymin,ymax]], a_min=0, a_max=img_height-1)
labels[:,[xmin,xmax]] = np.clip(labels[:,[xmin,xmax]], a_min=0, a_max=img_width-1)
return image, labels
class RandomTranslate:
'''
Randomly translates images horizontally and/or vertically.
'''
def __init__(self,
dy_minmax=(0.03,0.3),
dx_minmax=(0.03,0.3),
prob=0.5,
clip_boxes=True,
box_filter=None,
image_validator=None,
n_trials_max=3,
background=(0,0,0),
labels_format={'class_id': 0, 'xmin': 1, 'ymin': 2, 'xmax': 3, 'ymax': 4}):
'''
Arguments:
dy_minmax (list/tuple, optional): A 2-tuple `(min, max)` of non-negative floats that
determines the minimum and maximum relative translation of images along the vertical
axis both upward and downward. That is, images will be randomly translated by at least
`min` and at most `max` either upward or downward. For example, if `dy_minmax == (0.05,0.3)`,
an image of size `(100,100)` will be translated by at least 5 and at most 30 pixels
either upward or downward. The translation direction is chosen randomly.
dx_minmax (list/tuple, optional): A 2-tuple `(min, max)` of non-negative floats that
determines the minimum and maximum relative translation of images along the horizontal
axis both to the left and right. That is, images will be randomly translated by at least
`min` and at most `max` either left or right. For example, if `dx_minmax == (0.05,0.3)`,
an image of size `(100,100)` will be translated by at least 5 and at most 30 pixels
either left or right. The translation direction is chosen randomly.
prob (float, optional): `(1 - prob)` determines the probability with which the original,
unaltered image is returned.
clip_boxes (bool, optional): Only relevant if ground truth bounding boxes are given.
If `True`, any ground truth bounding boxes will be clipped to lie entirely within the
image after the translation.
box_filter (BoxFilter, optional): Only relevant if ground truth bounding boxes are given.
A `BoxFilter` object to filter out bounding boxes that don't meet the given criteria
after the transformation. Refer to the `BoxFilter` documentation for details. If `None`,
the validity of the bounding boxes is not checked.
image_validator (ImageValidator, optional): Only relevant if ground truth bounding boxes are given.
An `ImageValidator` object to determine whether a translated image is valid. If `None`,
any outcome is valid.
n_trials_max (int, optional): Only relevant if ground truth bounding boxes are given.
Determines the maxmial number of trials to produce a valid image. If no valid image could
be produced in `n_trials_max` trials, returns the unaltered input image.
background (list/tuple, optional): A 3-tuple specifying the RGB color value of the
background pixels of the translated images.
labels_format (dict, optional): A dictionary that defines which index in the last axis of the labels
of an image contains which bounding box coordinate. The dictionary maps at least the keywords
'xmin', 'ymin', 'xmax', and 'ymax' to their respective indices within last axis of the labels array.
'''
if dy_minmax[0] > dy_minmax[1]:
raise ValueError("It must be `dy_minmax[0] <= dy_minmax[1]`.")
if dx_minmax[0] > dx_minmax[1]:
raise ValueError("It must be `dx_minmax[0] <= dx_minmax[1]`.")
if dy_minmax[0] < 0 or dx_minmax[0] < 0:
raise ValueError("It must be `dy_minmax[0] >= 0` and `dx_minmax[0] >= 0`.")
if not (isinstance(image_validator, ImageValidator) or image_validator is None):
raise ValueError("`image_validator` must be either `None` or an `ImageValidator` object.")
self.dy_minmax = dy_minmax
self.dx_minmax = dx_minmax
self.prob = prob
self.clip_boxes = clip_boxes
self.box_filter = box_filter
self.image_validator = image_validator
self.n_trials_max = n_trials_max
self.background = background
self.labels_format = labels_format
self.translate = Translate(dy=0,
dx=0,
clip_boxes=self.clip_boxes,
box_filter=self.box_filter,
background=self.background,
labels_format=self.labels_format)
def __call__(self, image, labels=None):
p = np.random.uniform(0,1)
if p >= (1.0-self.prob):
img_height, img_width = image.shape[:2]
xmin = self.labels_format['xmin']
ymin = self.labels_format['ymin']
xmax = self.labels_format['xmax']
ymax = self.labels_format['ymax']
# Override the preset labels format.
if not self.image_validator is None:
self.image_validator.labels_format = self.labels_format
self.translate.labels_format = self.labels_format
for _ in range(max(1, self.n_trials_max)):
# Pick the relative amount by which to translate.
dy_abs = np.random.uniform(self.dy_minmax[0], self.dy_minmax[1])
dx_abs = np.random.uniform(self.dx_minmax[0], self.dx_minmax[1])
# Pick the direction in which to translate.
dy = np.random.choice([-dy_abs, dy_abs])
dx = np.random.choice([-dx_abs, dx_abs])
self.translate.dy_rel = dy
self.translate.dx_rel = dx
if (labels is None) or (self.image_validator is None):
# We either don't have any boxes or if we do, we will accept any outcome as valid.
return self.translate(image, labels)
else:
# Translate the box coordinates to the translated image's coordinate system.
new_labels = np.copy(labels)
new_labels[:, [ymin, ymax]] += int(round(img_height * dy))
new_labels[:, [xmin, xmax]] += int(round(img_width * dx))
# Check if the patch is valid.
if self.image_validator(labels=new_labels,
image_height=img_height,
image_width=img_width):
return self.translate(image, labels)
# If all attempts failed, return the unaltered input image.
if labels is None:
return image
else:
return image, labels
elif labels is None:
return image
else:
return image, labels
class Scale:
'''
Scales images, i.e. zooms in or out.
'''
def __init__(self,
factor,
clip_boxes=True,
box_filter=None,
background=(0,0,0),
labels_format={'class_id': 0, 'xmin': 1, 'ymin': 2, 'xmax': 3, 'ymax': 4}):
'''
Arguments:
factor (float): The fraction of the image size by which to scale images. Must be positive.
clip_boxes (bool, optional): Only relevant if ground truth bounding boxes are given.
If `True`, any ground truth bounding boxes will be clipped to lie entirely within the
image after the translation.
box_filter (BoxFilter, optional): Only relevant if ground truth bounding boxes are given.
A `BoxFilter` object to filter out bounding boxes that don't meet the given criteria
after the transformation. Refer to the `BoxFilter` documentation for details. If `None`,
the validity of the bounding boxes is not checked.
background (list/tuple, optional): A 3-tuple specifying the RGB color value of the potential
background pixels of the scaled images.
labels_format (dict, optional): A dictionary that defines which index in the last axis of the labels
of an image contains which bounding box coordinate. The dictionary maps at least the keywords
'xmin', 'ymin', 'xmax', and 'ymax' to their respective indices within last axis of the labels array.
'''
if factor <= 0:
raise ValueError("It must be `factor > 0`.")
if not (isinstance(box_filter, BoxFilter) or box_filter is None):
raise ValueError("`box_filter` must be either `None` or a `BoxFilter` object.")
self.factor = factor
self.clip_boxes = clip_boxes
self.box_filter = box_filter
self.background = background
self.labels_format = labels_format
def __call__(self, image, labels=None):
img_height, img_width = image.shape[:2]
# Compute the rotation matrix.
M = cv2.getRotationMatrix2D(center=(img_width / 2, img_height / 2),
angle=0,
scale=self.factor)
# Scale the image.
image = cv2.warpAffine(image,
M=M,
dsize=(img_width, img_height),
borderMode=cv2.BORDER_CONSTANT,
borderValue=self.background)
if labels is None:
return image
else:
xmin = self.labels_format['xmin']
ymin = self.labels_format['ymin']
xmax = self.labels_format['xmax']
ymax = self.labels_format['ymax']
labels = np.copy(labels)
# Scale the bounding boxes accordingly.
# Transform two opposite corner points of the rectangular boxes using the rotation matrix `M`.
toplefts = np.array([labels[:,xmin], labels[:,ymin], np.ones(labels.shape[0])])
bottomrights = np.array([labels[:,xmax], labels[:,ymax], np.ones(labels.shape[0])])
new_toplefts = (np.dot(M, toplefts)).T
new_bottomrights = (np.dot(M, bottomrights)).T
labels[:,[xmin,ymin]] = np.round(new_toplefts, decimals=0).astype(np.int)
labels[:,[xmax,ymax]] = np.round(new_bottomrights, decimals=0).astype(np.int)
# Compute all valid boxes for this patch.
if not (self.box_filter is None):
self.box_filter.labels_format = self.labels_format
labels = self.box_filter(labels=labels,
image_height=img_height,
image_width=img_width)
if self.clip_boxes:
labels[:,[ymin,ymax]] = np.clip(labels[:,[ymin,ymax]], a_min=0, a_max=img_height-1)
labels[:,[xmin,xmax]] = np.clip(labels[:,[xmin,xmax]], a_min=0, a_max=img_width-1)
return image, labels
class RandomScale:
'''
Randomly scales images.
'''
def __init__(self,
min_factor=0.5,
max_factor=1.5,
prob=0.5,
clip_boxes=True,
box_filter=None,
image_validator=None,
n_trials_max=3,
background=(0,0,0),
labels_format={'class_id': 0, 'xmin': 1, 'ymin': 2, 'xmax': 3, 'ymax': 4}):
'''
Arguments:
min_factor (float, optional): The minimum fraction of the image size by which to scale images.
Must be positive.
max_factor (float, optional): The maximum fraction of the image size by which to scale images.
Must be positive.
prob (float, optional): `(1 - prob)` determines the probability with which the original,
unaltered image is returned.
clip_boxes (bool, optional): Only relevant if ground truth bounding boxes are given.
If `True`, any ground truth bounding boxes will be clipped to lie entirely within the
image after the translation.
box_filter (BoxFilter, optional): Only relevant if ground truth bounding boxes are given.
A `BoxFilter` object to filter out bounding boxes that don't meet the given criteria
after the transformation. Refer to the `BoxFilter` documentation for details. If `None`,
the validity of the bounding boxes is not checked.
image_validator (ImageValidator, optional): Only relevant if ground truth bounding boxes are given.
An `ImageValidator` object to determine whether a scaled image is valid. If `None`,
any outcome is valid.
n_trials_max (int, optional): Only relevant if ground truth bounding boxes are given.
Determines the maxmial number of trials to produce a valid image. If no valid image could
be produced in `n_trials_max` trials, returns the unaltered input image.
background (list/tuple, optional): A 3-tuple specifying the RGB color value of the potential
background pixels of the scaled images.
labels_format (dict, optional): A dictionary that defines which index in the last axis of the labels
of an image contains which bounding box coordinate. The dictionary maps at least the keywords
'xmin', 'ymin', 'xmax', and 'ymax' to their respective indices within last axis of the labels array.
'''
if not (0 < min_factor <= max_factor):
raise ValueError("It must be `0 < min_factor <= max_factor`.")
if not (isinstance(image_validator, ImageValidator) or image_validator is None):
raise ValueError("`image_validator` must be either `None` or an `ImageValidator` object.")
self.min_factor = min_factor
self.max_factor = max_factor
self.prob = prob
self.clip_boxes = clip_boxes
self.box_filter = box_filter
self.image_validator = image_validator
self.n_trials_max = n_trials_max
self.background = background
self.labels_format = labels_format
self.scale = Scale(factor=1.0,
clip_boxes=self.clip_boxes,
box_filter=self.box_filter,
background=self.background,
labels_format=self.labels_format)
def __call__(self, image, labels=None):
p = np.random.uniform(0,1)
if p >= (1.0-self.prob):
img_height, img_width = image.shape[:2]
xmin = self.labels_format['xmin']
ymin = self.labels_format['ymin']
xmax = self.labels_format['xmax']
ymax = self.labels_format['ymax']
# Override the preset labels format.
if not self.image_validator is None:
self.image_validator.labels_format = self.labels_format
self.scale.labels_format = self.labels_format
for _ in range(max(1, self.n_trials_max)):
# Pick a scaling factor.
factor = np.random.uniform(self.min_factor, self.max_factor)
self.scale.factor = factor
if (labels is None) or (self.image_validator is None):
# We either don't have any boxes or if we do, we will accept any outcome as valid.
return self.scale(image, labels)
else:
# Scale the bounding boxes accordingly.
# Transform two opposite corner points of the rectangular boxes using the rotation matrix `M`.
toplefts = np.array([labels[:,xmin], labels[:,ymin], np.ones(labels.shape[0])])
bottomrights = np.array([labels[:,xmax], labels[:,ymax], np.ones(labels.shape[0])])
# Compute the rotation matrix.
M = cv2.getRotationMatrix2D(center=(img_width / 2, img_height / 2),
angle=0,
scale=factor)
new_toplefts = (np.dot(M, toplefts)).T
new_bottomrights = (np.dot(M, bottomrights)).T
new_labels = np.copy(labels)
new_labels[:,[xmin,ymin]] = np.around(new_toplefts, decimals=0).astype(np.int)
new_labels[:,[xmax,ymax]] = np.around(new_bottomrights, decimals=0).astype(np.int)
# Check if the patch is valid.
if self.image_validator(labels=new_labels,
image_height=img_height,
image_width=img_width):
return self.scale(image, labels)
# If all attempts failed, return the unaltered input image.
if labels is None:
return image
else:
return image, labels
elif labels is None:
return image
else:
return image, labels
class Rotate:
'''
Rotates images counter-clockwise by 90, 180, or 270 degrees.
'''
def __init__(self,
angle,
labels_format={'class_id': 0, 'xmin': 1, 'ymin': 2, 'xmax': 3, 'ymax': 4}):
'''
Arguments:
angle (int): The angle in degrees by which to rotate the images counter-clockwise.
Only 90, 180, and 270 are valid values.
labels_format (dict, optional): A dictionary that defines which index in the last axis of the labels
of an image contains which bounding box coordinate. The dictionary maps at least the keywords
'xmin', 'ymin', 'xmax', and 'ymax' to their respective indices within last axis of the labels array.
'''
if not angle in {90, 180, 270}:
raise ValueError("`angle` must be in the set {90, 180, 270}.")
self.angle = angle
self.labels_format = labels_format
def __call__(self, image, labels=None):
img_height, img_width = image.shape[:2]
# Compute the rotation matrix.
M = cv2.getRotationMatrix2D(center=(img_width / 2, img_height / 2),
angle=self.angle,
scale=1)
# Get the sine and cosine from the rotation matrix.
cos_angle = np.abs(M[0, 0])
sin_angle = np.abs(M[0, 1])
# Compute the new bounding dimensions of the image.
img_width_new = int(img_height * sin_angle + img_width * cos_angle)
img_height_new = int(img_height * cos_angle + img_width * sin_angle)
# Adjust the rotation matrix to take into account the translation.
M[1, 2] += (img_height_new - img_height) / 2
M[0, 2] += (img_width_new - img_width) / 2
# Rotate the image.
image = cv2.warpAffine(image,
M=M,
dsize=(img_width_new, img_height_new))
if labels is None:
return image
else:
xmin = self.labels_format['xmin']
ymin = self.labels_format['ymin']
xmax = self.labels_format['xmax']
ymax = self.labels_format['ymax']
labels = np.copy(labels)
# Rotate the bounding boxes accordingly.
# Transform two opposite corner points of the rectangular boxes using the rotation matrix `M`.
toplefts = np.array([labels[:,xmin], labels[:,ymin], np.ones(labels.shape[0])])
bottomrights = np.array([labels[:,xmax], labels[:,ymax], np.ones(labels.shape[0])])
new_toplefts = (np.dot(M, toplefts)).T
new_bottomrights = (np.dot(M, bottomrights)).T
labels[:,[xmin,ymin]] = np.round(new_toplefts, decimals=0).astype(np.int)
labels[:,[xmax,ymax]] = np.round(new_bottomrights, decimals=0).astype(np.int)
if self.angle == 90:
# ymin and ymax were switched by the rotation.
labels[:,[ymax,ymin]] = labels[:,[ymin,ymax]]
elif self.angle == 180:
# ymin and ymax were switched by the rotation,
# and also xmin and xmax were switched.
labels[:,[ymax,ymin]] = labels[:,[ymin,ymax]]
labels[:,[xmax,xmin]] = labels[:,[xmin,xmax]]
elif self.angle == 270:
# xmin and xmax were switched by the rotation.
labels[:,[xmax,xmin]] = labels[:,[xmin,xmax]]
return image, labels
class RandomRotate:
'''
Randomly rotates images counter-clockwise.
'''
def __init__(self,
angles=[90, 180, 270],
prob=0.5,
labels_format={'class_id': 0, 'xmin': 1, 'ymin': 2, 'xmax': 3, 'ymax': 4}):
'''
Arguments:
angle (list): The list of angles in degrees from which one is randomly selected to rotate
the images counter-clockwise. Only 90, 180, and 270 are valid values.
prob (float, optional): `(1 - prob)` determines the probability with which the original,
unaltered image is returned.
labels_format (dict, optional): A dictionary that defines which index in the last axis of the labels
of an image contains which bounding box coordinate. The dictionary maps at least the keywords
'xmin', 'ymin', 'xmax', and 'ymax' to their respective indices within last axis of the labels array.
'''
for angle in angles:
if not angle in {90, 180, 270}:
raise ValueError("`angles` can only contain the values 90, 180, and 270.")
self.angles = angles
self.prob = prob
self.labels_format = labels_format
self.rotate = Rotate(angle=90, labels_format=self.labels_format)
def __call__(self, image, labels=None):
p = np.random.uniform(0,1)
if p >= (1.0-self.prob):
# Pick a rotation angle.
self.rotate.angle = random.choice(self.angles)
self.rotate.labels_format = self.labels_format
return self.rotate(image, labels)
elif labels is None:
return image
else:
return image, labels
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/builders/data_generator/object_detection_2d_geometric_ops.py |
'''
The data augmentation operations of the original SSD implementation.
Copyright (C) 2018 Pierluigi Ferrari
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from __future__ import division
import numpy as np
import cv2
import inspect
from nvidia_tao_tf1.cv.ssd.builders.data_generator.object_detection_2d_photometric_ops import ConvertColor, ConvertDataType, ConvertTo3Channels, RandomBrightness, RandomContrast, RandomHue, RandomSaturation, RandomChannelSwap
from nvidia_tao_tf1.cv.ssd.builders.data_generator.object_detection_2d_patch_sampling_ops import PatchCoordinateGenerator, RandomPatch, RandomPatchInf
from nvidia_tao_tf1.cv.ssd.builders.data_generator.object_detection_2d_geometric_ops import ResizeRandomInterp, RandomFlip
from nvidia_tao_tf1.cv.ssd.builders.data_generator.object_detection_2d_image_boxes_validation_utils import BoundGenerator, BoxFilter, ImageValidator
class SSDRandomCrop:
'''
Performs the same random crops as defined by the `batch_sampler` instructions
of the original Caffe implementation of SSD. A description of this random cropping
strategy can also be found in the data augmentation section of the paper:
https://arxiv.org/abs/1512.02325
'''
def __init__(self,
min_scale=0.3,
max_scale=1.0,
min_ar=0.5,
max_ar=2.0,
labels_format={'class_id': 0, 'xmin': 1, 'ymin': 2, 'xmax': 3, 'ymax': 4}):
'''
Arguments:
labels_format (dict, optional): A dictionary that defines which index in the last axis of the labels
of an image contains which bounding box coordinate. The dictionary maps at least the keywords
'xmin', 'ymin', 'xmax', and 'ymax' to their respective indices within last axis of the labels array.
'''
self.labels_format = labels_format
# This randomly samples one of the lower IoU bounds defined
# by the `sample_space` every time it is called.
self.bound_generator = BoundGenerator(sample_space=((None, None),
(0.1, None),
(0.3, None),
(0.5, None),
(0.7, None),
(0.9, None)),
weights=None)
# Produces coordinates for candidate patches such that the height
# and width of the patches are between 0.3 and 1.0 of the height
# and width of the respective image and the aspect ratio of the
# patches is between 0.5 and 2.0.
self.patch_coord_generator = PatchCoordinateGenerator(must_match='h_w',
min_scale=min_scale,
max_scale=max_scale,
scale_uniformly=False,
min_aspect_ratio=min_ar,
max_aspect_ratio=max_ar)
# Filters out boxes whose center point does not lie within the
# chosen patches.
self.box_filter = BoxFilter(check_overlap=True,
check_min_area=False,
check_degenerate=False,
overlap_criterion='center_point',
labels_format=self.labels_format)
# Determines whether a given patch is considered a valid patch.
# Defines a patch to be valid if at least one ground truth bounding box
# (n_boxes_min == 1) has an IoU overlap with the patch that
# meets the requirements defined by `bound_generator`.
self.image_validator = ImageValidator(overlap_criterion='iou',
n_boxes_min=1,
labels_format=self.labels_format,
border_pixels='half')
# Performs crops according to the parameters set in the objects above.
# Runs until either a valid patch is found or the original input image
# is returned unaltered. Runs a maximum of 50 trials to find a valid
# patch for each new sampled IoU threshold. Every 50 trials, the original
# image is returned as is with probability (1 - prob) = 0.143.
self.random_crop = RandomPatchInf(patch_coord_generator=self.patch_coord_generator,
box_filter=self.box_filter,
image_validator=self.image_validator,
bound_generator=self.bound_generator,
n_trials_max=50,
clip_boxes=True,
prob=0.857,
labels_format=self.labels_format)
def __call__(self, image, labels=None, return_inverter=False):
self.random_crop.labels_format = self.labels_format
return self.random_crop(image, labels, return_inverter)
class SSDExpand:
'''
Performs the random image expansion as defined by the `train_transform_param` instructions
of the original Caffe implementation of SSD. A description of this expansion strategy
can also be found in section 3.6 ("Data Augmentation for Small Object Accuracy") of the paper:
https://arxiv.org/abs/1512.02325
'''
def __init__(self,
min_scale=1.0,
max_scale=4.0,
background=(123, 117, 104),
labels_format={'class_id': 0, 'xmin': 1, 'ymin': 2, 'xmax': 3, 'ymax': 4}):
'''
Arguments:
background (list/tuple, optional): A 3-tuple specifying the RGB color value of the
background pixels of the translated images.
labels_format (dict, optional): A dictionary that defines which index in the last axis of the labels
of an image contains which bounding box coordinate. The dictionary maps at least the keywords
'xmin', 'ymin', 'xmax', and 'ymax' to their respective indices within last axis of the labels array.
'''
self.labels_format = labels_format
# Generate coordinates for patches that are between 1.0 and 4.0 times
# the size of the input image in both spatial dimensions.
self.patch_coord_generator = PatchCoordinateGenerator(must_match='h_w',
min_scale=min_scale,
max_scale=max_scale,
scale_uniformly=True)
# With probability 0.5, place the input image randomly on a canvas filled with
# mean color values according to the parameters set above. With probability 0.5,
# return the input image unaltered.
self.expand = RandomPatch(patch_coord_generator=self.patch_coord_generator,
box_filter=None,
image_validator=None,
n_trials_max=1,
clip_boxes=False,
prob=0.5,
background=background,
labels_format=self.labels_format)
def __call__(self, image, labels=None, return_inverter=False):
self.expand.labels_format = self.labels_format
return self.expand(image, labels, return_inverter)
class SSDPhotometricDistortions:
'''
Performs the photometric distortions defined by the `train_transform_param` instructions
of the original Caffe implementation of SSD.
'''
def __init__(self, b=32, c=0.5, s=0.5, h=18):
self.convert_RGB_to_HSV = ConvertColor(current='RGB', to='HSV')
self.convert_HSV_to_RGB = ConvertColor(current='HSV', to='RGB')
self.convert_to_float32 = ConvertDataType(to='float32')
self.convert_to_uint8 = ConvertDataType(to='uint8')
self.convert_to_3_channels = ConvertTo3Channels()
self.random_brightness = RandomBrightness(lower=-b, upper=b, prob=0.5)
self.random_contrast = RandomContrast(lower=1-c, upper=1+c, prob=0.5)
self.random_saturation = RandomSaturation(lower=1-s, upper=1+s, prob=0.5)
self.random_hue = RandomHue(max_delta=h, prob=0.5)
# self.random_brightness = RandomBrightness(lower=-32, upper=32, prob=0)
# self.random_contrast = RandomContrast(lower=0.5, upper=1.5, prob=0)
# self.random_saturation = RandomSaturation(lower=0.5, upper=1.5, prob=0)
# self.random_hue = RandomHue(max_delta=18, prob=0)
self.random_channel_swap = RandomChannelSwap(prob=0.0)
self.sequence1 = [self.convert_to_3_channels,
self.convert_to_float32,
self.random_brightness,
self.random_contrast,
self.convert_to_uint8,
self.convert_RGB_to_HSV,
self.convert_to_float32,
self.random_saturation,
self.random_hue,
self.convert_to_uint8,
self.convert_HSV_to_RGB,
self.random_channel_swap]
self.sequence2 = [self.convert_to_3_channels,
self.convert_to_float32,
self.random_brightness,
self.convert_to_uint8,
self.convert_RGB_to_HSV,
self.convert_to_float32,
self.random_saturation,
self.random_hue,
self.convert_to_uint8,
self.convert_HSV_to_RGB,
self.convert_to_float32,
self.random_contrast,
self.convert_to_uint8,
self.random_channel_swap]
def __call__(self, image, labels):
# Choose sequence 1 with probability 0.5.
if np.random.choice(2):
for transform in self.sequence1:
image, labels = transform(image, labels)
return image, labels
# Choose sequence 2 with probability 0.5.
else:
for transform in self.sequence2:
image, labels = transform(image, labels)
return image, labels
class SSDDataAugmentation:
'''
Reproduces the data augmentation pipeline used in the training of the original
Caffe implementation of SSD.
'''
def __init__(self,
img_height=300,
img_width=300,
rc_min=0.3,
rc_max=1.0,
rc_min_ar=0.5,
rc_max_ar=2.0,
zo_min=1.0,
zo_max=4.0,
b_delta=32,
c_delta=0.5,
s_delta=0.5,
h_delta=18,
flip_prob=0.5,
background=(123, 117, 104),
labels_format={'class_id': 0, 'xmin': 1, 'ymin': 2, 'xmax': 3, 'ymax': 4}):
'''
Arguments:
height (int): The desired height of the output images in pixels.
width (int): The desired width of the output images in pixels.
background (list/tuple, optional): A 3-tuple specifying the RGB color value of the
background pixels of the translated images.
labels_format (dict, optional): A dictionary that defines which index in the last axis of the labels
of an image contains which bounding box coordinate. The dictionary maps at least the keywords
'xmin', 'ymin', 'xmax', and 'ymax' to their respective indices within last axis of the labels array.
'''
self.labels_format = labels_format
self.photometric_distortions = SSDPhotometricDistortions(b=b_delta, c=c_delta, s=s_delta, h=h_delta)
self.expand = SSDExpand(background=background,
labels_format=self.labels_format,
min_scale=zo_min,
max_scale=zo_max)
self.random_crop = SSDRandomCrop(labels_format=self.labels_format,
min_scale=rc_min,
max_scale=rc_max,
min_ar=rc_min_ar,
max_ar=rc_max_ar)
self.random_flip = RandomFlip(dim='horizontal', prob=flip_prob, labels_format=self.labels_format)
# This box filter makes sure that the resized images don't contain any degenerate boxes.
# Resizing the images could lead the boxes to becomes smaller. For boxes that are already
# pretty small, that might result in boxes with height and/or width zero, which we obviously
# cannot allow.
self.box_filter = BoxFilter(check_overlap=False,
check_min_area=False,
check_degenerate=True,
labels_format=self.labels_format)
self.resize = ResizeRandomInterp(height=img_height,
width=img_width,
interpolation_modes=[cv2.INTER_NEAREST,
cv2.INTER_LINEAR,
cv2.INTER_CUBIC,
cv2.INTER_AREA,
cv2.INTER_LANCZOS4],
box_filter=self.box_filter,
labels_format=self.labels_format)
self.sequence = []
assert b_delta >= 0, "Brightness delta value should be >=0."
assert 1 > c_delta >= 0, "Contrast delta value should be [0, 1)."
assert 1 > s_delta >= 0, "Saturation delta value should be [0, 1)."
assert h_delta >= 0, "Hue delta value should be >=0."
if not (b_delta == c_delta == s_delta == h_delta == 0):
self.sequence.append(self.photometric_distortions)
assert zo_max >= zo_min >= 1.0, "Zoom out range should be [1, x] where x >=1."
if not np.allclose([zo_max, 1.0], [1.0, zo_min], atol=1e-5):
self.sequence.append(self.expand)
assert 1 >= rc_max >= rc_min > 0, "Random crop should be (0, 1]."
if not np.allclose([rc_max, 1.0], [1.0, rc_min], atol=1e-5):
self.sequence.append(self.random_crop)
assert 1 > flip_prob >= 0, "Random flip probability should be [0, 1)"
if flip_prob > 0:
self.sequence.append(self.random_flip)
self.sequence.append(self.resize)
def __call__(self, image, labels, return_inverter=False):
self.expand.labels_format = self.labels_format
self.random_crop.labels_format = self.labels_format
self.random_flip.labels_format = self.labels_format
self.resize.labels_format = self.labels_format
inverters = []
sequences = self.sequence
for transform in sequences:
if return_inverter and ('return_inverter' in inspect.signature(transform).parameters):
image, labels, inverter = transform(image, labels, return_inverter=True)
inverters.append(inverter)
else:
image, labels = transform(image, labels)
if return_inverter:
return image, labels, inverters[::-1]
else:
# return image, labels
# @TODO(tylerz): for py_function
return image.astype(np.float32), labels.astype(np.float32)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/builders/data_generator/data_augmentation_chain_original_ssd.py |
'''
Various patch sampling operations for data augmentation in 2D object detection.
Copyright (C) 2018 Pierluigi Ferrari
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from __future__ import division
import numpy as np
from nvidia_tao_tf1.cv.ssd.builders.data_generator.object_detection_2d_image_boxes_validation_utils import BoundGenerator, BoxFilter, ImageValidator
class PatchCoordinateGenerator:
'''
Generates random patch coordinates that meet specified requirements.
'''
def __init__(self,
img_height=None,
img_width=None,
must_match='h_w',
min_scale=0.3,
max_scale=1.0,
scale_uniformly=False,
min_aspect_ratio = 0.5,
max_aspect_ratio = 2.0,
patch_ymin=None,
patch_xmin=None,
patch_height=None,
patch_width=None,
patch_aspect_ratio=None):
'''
Arguments:
img_height (int): The height of the image for which the patch coordinates
shall be generated. Doesn't have to be known upon construction.
img_width (int): The width of the image for which the patch coordinates
shall be generated. Doesn't have to be known upon construction.
must_match (str, optional): Can be either of 'h_w', 'h_ar', and 'w_ar'.
Specifies which two of the three quantities height, width, and aspect
ratio determine the shape of the generated patch. The respective third
quantity will be computed from the other two. For example,
if `must_match == 'h_w'`, then the patch's height and width will be
set to lie within [min_scale, max_scale] of the image size or to
`patch_height` and/or `patch_width`, if given. The patch's aspect ratio
is the dependent variable in this case, it will be computed from the
height and width. Any given values for `patch_aspect_ratio`,
`min_aspect_ratio`, or `max_aspect_ratio` will be ignored.
min_scale (float, optional): The minimum size of a dimension of the patch
as a fraction of the respective dimension of the image. Can be greater
than 1. For example, if the image width is 200 and `min_scale == 0.5`,
then the width of the generated patch will be at least 100. If `min_scale == 1.5`,
the width of the generated patch will be at least 300.
max_scale (float, optional): The maximum size of a dimension of the patch
as a fraction of the respective dimension of the image. Can be greater
than 1. For example, if the image width is 200 and `max_scale == 1.0`,
then the width of the generated patch will be at most 200. If `max_scale == 1.5`,
the width of the generated patch will be at most 300. Must be greater than
`min_scale`.
scale_uniformly (bool, optional): If `True` and if `must_match == 'h_w'`,
the patch height and width will be scaled uniformly, otherwise they will
be scaled independently.
min_aspect_ratio (float, optional): Determines the minimum aspect ratio
for the generated patches.
max_aspect_ratio (float, optional): Determines the maximum aspect ratio
for the generated patches.
patch_ymin (int, optional): `None` or the vertical coordinate of the top left
corner of the generated patches. If this is not `None`, the position of the
patches along the vertical axis is fixed. If this is `None`, then the
vertical position of generated patches will be chosen randomly such that
the overlap of a patch and the image along the vertical dimension is
always maximal.
patch_xmin (int, optional): `None` or the horizontal coordinate of the top left
corner of the generated patches. If this is not `None`, the position of the
patches along the horizontal axis is fixed. If this is `None`, then the
horizontal position of generated patches will be chosen randomly such that
the overlap of a patch and the image along the horizontal dimension is
always maximal.
patch_height (int, optional): `None` or the fixed height of the generated patches.
patch_width (int, optional): `None` or the fixed width of the generated patches.
patch_aspect_ratio (float, optional): `None` or the fixed aspect ratio of the
generated patches.
'''
if not (must_match in {'h_w', 'h_ar', 'w_ar'}):
raise ValueError("`must_match` must be either of 'h_w', 'h_ar' and 'w_ar'.")
if scale_uniformly and not ((patch_height is None) and (patch_width is None)):
raise ValueError("If `scale_uniformly == True`, `patch_height` and `patch_width` must both be `None`.")
self.img_height = img_height
self.img_width = img_width
self.must_match = must_match
self.min_scale = min_scale
self.max_scale = max_scale
self.scale_uniformly = scale_uniformly
self.min_aspect_ratio = min_aspect_ratio
self.max_aspect_ratio = max_aspect_ratio
self.patch_ymin = patch_ymin
self.patch_xmin = patch_xmin
self.patch_height = patch_height
self.patch_width = patch_width
self.patch_aspect_ratio = patch_aspect_ratio
def __call__(self):
'''
Returns:
A 4-tuple `(ymin, xmin, height, width)` that represents the coordinates
of the generated patch.
'''
# Get the patch height and width.
if self.must_match == 'h_w': # Aspect is the dependent variable.
if not self.scale_uniformly:
# Get the height.
if self.patch_height is None:
patch_height = int(np.random.uniform(self.min_scale, self.max_scale) * self.img_height)
else:
patch_height = self.patch_height
# Get the width.
if self.patch_width is None:
patch_width = int(np.random.uniform(self.min_scale, self.max_scale) * self.img_width)
else:
patch_width = self.patch_width
else:
scaling_factor = np.random.uniform(self.min_scale, self.max_scale)
patch_height = int(scaling_factor * self.img_height)
patch_width = int(scaling_factor * self.img_width)
elif self.must_match == 'h_ar': # Width is the dependent variable.
# Get the height.
if self.patch_height is None:
patch_height = int(np.random.uniform(self.min_scale, self.max_scale) * self.img_height)
else:
patch_height = self.patch_height
# Get the aspect ratio.
if self.patch_aspect_ratio is None:
patch_aspect_ratio = np.random.uniform(self.min_aspect_ratio, self.max_aspect_ratio)
else:
patch_aspect_ratio = self.patch_aspect_ratio
# Get the width.
patch_width = int(patch_height * patch_aspect_ratio)
elif self.must_match == 'w_ar': # Height is the dependent variable.
# Get the width.
if self.patch_width is None:
patch_width = int(np.random.uniform(self.min_scale, self.max_scale) * self.img_width)
else:
patch_width = self.patch_width
# Get the aspect ratio.
if self.patch_aspect_ratio is None:
patch_aspect_ratio = np.random.uniform(self.min_aspect_ratio, self.max_aspect_ratio)
else:
patch_aspect_ratio = self.patch_aspect_ratio
# Get the height.
patch_height = int(patch_width / patch_aspect_ratio)
# Get the top left corner coordinates of the patch.
if self.patch_ymin is None:
# Compute how much room we have along the vertical axis to place the patch.
# A negative number here means that we want to sample a patch that is larger than the original image
# in the vertical dimension, in which case the patch will be placed such that it fully contains the
# image in the vertical dimension.
y_range = self.img_height - patch_height
# Select a random top left corner for the sample position from the possible positions.
if y_range >= 0: patch_ymin = np.random.randint(0, y_range + 1) # There are y_range + 1 possible positions for the crop in the vertical dimension.
else: patch_ymin = np.random.randint(y_range, 1) # The possible positions for the image on the background canvas in the vertical dimension.
else:
patch_ymin = self.patch_ymin
if self.patch_xmin is None:
# Compute how much room we have along the horizontal axis to place the patch.
# A negative number here means that we want to sample a patch that is larger than the original image
# in the horizontal dimension, in which case the patch will be placed such that it fully contains the
# image in the horizontal dimension.
x_range = self.img_width - patch_width
# Select a random top left corner for the sample position from the possible positions.
if x_range >= 0: patch_xmin = np.random.randint(0, x_range + 1) # There are x_range + 1 possible positions for the crop in the horizontal dimension.
else: patch_xmin = np.random.randint(x_range, 1) # The possible positions for the image on the background canvas in the horizontal dimension.
else:
patch_xmin = self.patch_xmin
return (patch_ymin, patch_xmin, patch_height, patch_width)
class CropPad:
'''
Crops and/or pads an image deterministically.
Depending on the given output patch size and the position (top left corner) relative
to the input image, the image will be cropped and/or padded along one or both spatial
dimensions.
For example, if the output patch lies entirely within the input image, this will result
in a regular crop. If the input image lies entirely within the output patch, this will
result in the image being padded in every direction. All other cases are mixed cases
where the image might be cropped in some directions and padded in others.
The output patch can be arbitrary in both size and position as long as it overlaps
with the input image.
'''
def __init__(self,
patch_ymin,
patch_xmin,
patch_height,
patch_width,
clip_boxes=True,
box_filter=None,
background=(0,0,0),
labels_format={'class_id': 0, 'xmin': 1, 'ymin': 2, 'xmax': 3, 'ymax': 4}):
'''
Arguments:
patch_ymin (int, optional): The vertical coordinate of the top left corner of the output
patch relative to the image coordinate system. Can be negative (i.e. lie outside the image)
as long as the resulting patch still overlaps with the image.
patch_ymin (int, optional): The horizontal coordinate of the top left corner of the output
patch relative to the image coordinate system. Can be negative (i.e. lie outside the image)
as long as the resulting patch still overlaps with the image.
patch_height (int): The height of the patch to be sampled from the image. Can be greater
than the height of the input image.
patch_width (int): The width of the patch to be sampled from the image. Can be greater
than the width of the input image.
clip_boxes (bool, optional): Only relevant if ground truth bounding boxes are given.
If `True`, any ground truth bounding boxes will be clipped to lie entirely within the
sampled patch.
box_filter (BoxFilter, optional): Only relevant if ground truth bounding boxes are given.
A `BoxFilter` object to filter out bounding boxes that don't meet the given criteria
after the transformation. Refer to the `BoxFilter` documentation for details. If `None`,
the validity of the bounding boxes is not checked.
background (list/tuple, optional): A 3-tuple specifying the RGB color value of the potential
background pixels of the scaled images. In the case of single-channel images,
the first element of `background` will be used as the background pixel value.
labels_format (dict, optional): A dictionary that defines which index in the last axis of the labels
of an image contains which bounding box coordinate. The dictionary maps at least the keywords
'xmin', 'ymin', 'xmax', and 'ymax' to their respective indices within last axis of the labels array.
'''
#if (patch_height <= 0) or (patch_width <= 0):
# raise ValueError("Patch height and width must both be positive.")
#if (patch_ymin + patch_height < 0) or (patch_xmin + patch_width < 0):
# raise ValueError("A patch with the given coordinates cannot overlap with an input image.")
if not (isinstance(box_filter, BoxFilter) or box_filter is None):
raise ValueError("`box_filter` must be either `None` or a `BoxFilter` object.")
self.patch_height = patch_height
self.patch_width = patch_width
self.patch_ymin = patch_ymin
self.patch_xmin = patch_xmin
self.clip_boxes = clip_boxes
self.box_filter = box_filter
self.background = background
self.labels_format = labels_format
def __call__(self, image, labels=None, return_inverter=False):
img_height, img_width = image.shape[:2]
if (self.patch_ymin > img_height) or (self.patch_xmin > img_width):
raise ValueError("The given patch doesn't overlap with the input image.")
labels = np.copy(labels)
xmin = self.labels_format['xmin']
ymin = self.labels_format['ymin']
xmax = self.labels_format['xmax']
ymax = self.labels_format['ymax']
# Top left corner of the patch relative to the image coordinate system:
patch_ymin = self.patch_ymin
patch_xmin = self.patch_xmin
# Create a canvas of the size of the patch we want to end up with.
if image.ndim == 3:
canvas = np.zeros(shape=(self.patch_height, self.patch_width, 3), dtype=np.uint8)
canvas[:, :] = self.background
elif image.ndim == 2:
canvas = np.zeros(shape=(self.patch_height, self.patch_width), dtype=np.uint8)
canvas[:, :] = self.background[0]
# Perform the crop.
if patch_ymin < 0 and patch_xmin < 0: # Pad the image at the top and on the left.
image_crop_height = min(img_height, self.patch_height + patch_ymin) # The number of pixels of the image that will end up on the canvas in the vertical direction.
image_crop_width = min(img_width, self.patch_width + patch_xmin) # The number of pixels of the image that will end up on the canvas in the horizontal direction.
canvas[-patch_ymin:-patch_ymin + image_crop_height, -patch_xmin:-patch_xmin + image_crop_width] = image[:image_crop_height, :image_crop_width]
elif patch_ymin < 0 and patch_xmin >= 0: # Pad the image at the top and crop it on the left.
image_crop_height = min(img_height, self.patch_height + patch_ymin) # The number of pixels of the image that will end up on the canvas in the vertical direction.
image_crop_width = min(self.patch_width, img_width - patch_xmin) # The number of pixels of the image that will end up on the canvas in the horizontal direction.
canvas[-patch_ymin:-patch_ymin + image_crop_height, :image_crop_width] = image[:image_crop_height, patch_xmin:patch_xmin + image_crop_width]
elif patch_ymin >= 0 and patch_xmin < 0: # Crop the image at the top and pad it on the left.
image_crop_height = min(self.patch_height, img_height - patch_ymin) # The number of pixels of the image that will end up on the canvas in the vertical direction.
image_crop_width = min(img_width, self.patch_width + patch_xmin) # The number of pixels of the image that will end up on the canvas in the horizontal direction.
canvas[:image_crop_height, -patch_xmin:-patch_xmin + image_crop_width] = image[patch_ymin:patch_ymin + image_crop_height, :image_crop_width]
elif patch_ymin >= 0 and patch_xmin >= 0: # Crop the image at the top and on the left.
image_crop_height = min(self.patch_height, img_height - patch_ymin) # The number of pixels of the image that will end up on the canvas in the vertical direction.
image_crop_width = min(self.patch_width, img_width - patch_xmin) # The number of pixels of the image that will end up on the canvas in the horizontal direction.
image_cropped = image[patch_ymin:patch_ymin + image_crop_height, patch_xmin:patch_xmin + image_crop_width]
canvas[:image_crop_height, :image_crop_width] = image_cropped
image = canvas
if return_inverter:
def inverter(labels):
labels = np.copy(labels)
labels[:, [ymin+1, ymax+1]] += patch_ymin
labels[:, [xmin+1, xmax+1]] += patch_xmin
return labels
if not (labels is None):
# Translate the box coordinates to the patch's coordinate system.
labels[:, [ymin, ymax]] -= patch_ymin
labels[:, [xmin, xmax]] -= patch_xmin
# Compute all valid boxes for this patch.
if not (self.box_filter is None):
self.box_filter.labels_format = self.labels_format
labels = self.box_filter(labels=labels,
image_height=self.patch_height,
image_width=self.patch_width)
if self.clip_boxes:
labels[:,[ymin,ymax]] = np.clip(labels[:,[ymin,ymax]], a_min=0, a_max=self.patch_height-1)
labels[:,[xmin,xmax]] = np.clip(labels[:,[xmin,xmax]], a_min=0, a_max=self.patch_width-1)
if return_inverter:
return image, labels, inverter
else:
return image, labels
else:
if return_inverter:
return image, inverter
else:
return image
class Crop:
'''
Crops off the specified numbers of pixels from the borders of images.
This is just a convenience interface for `CropPad`.
'''
def __init__(self,
crop_top,
crop_bottom,
crop_left,
crop_right,
clip_boxes=True,
box_filter=None,
labels_format={'class_id': 0, 'xmin': 1, 'ymin': 2, 'xmax': 3, 'ymax': 4}):
self.crop_top = crop_top
self.crop_bottom = crop_bottom
self.crop_left = crop_left
self.crop_right = crop_right
self.clip_boxes = clip_boxes
self.box_filter = box_filter
self.labels_format = labels_format
self.crop = CropPad(patch_ymin=self.crop_top,
patch_xmin=self.crop_left,
patch_height=None,
patch_width=None,
clip_boxes=self.clip_boxes,
box_filter=self.box_filter,
labels_format=self.labels_format)
def __call__(self, image, labels=None, return_inverter=False):
img_height, img_width = image.shape[:2]
self.crop.patch_height = img_height - self.crop_top - self.crop_bottom
self.crop.patch_width = img_width - self.crop_left - self.crop_right
self.crop.labels_format = self.labels_format
return self.crop(image, labels, return_inverter)
class Pad:
'''
Pads images by the specified numbers of pixels on each side.
This is just a convenience interface for `CropPad`.
'''
def __init__(self,
pad_top,
pad_bottom,
pad_left,
pad_right,
background=(0,0,0),
labels_format={'class_id': 0, 'xmin': 1, 'ymin': 2, 'xmax': 3, 'ymax': 4}):
self.pad_top = pad_top
self.pad_bottom = pad_bottom
self.pad_left = pad_left
self.pad_right = pad_right
self.background = background
self.labels_format = labels_format
self.pad = CropPad(patch_ymin=-self.pad_top,
patch_xmin=-self.pad_left,
patch_height=None,
patch_width=None,
clip_boxes=False,
box_filter=None,
background=self.background,
labels_format=self.labels_format)
def __call__(self, image, labels=None, return_inverter=False):
img_height, img_width = image.shape[:2]
self.pad.patch_height = img_height + self.pad_top + self.pad_bottom
self.pad.patch_width = img_width + self.pad_left + self.pad_right
self.pad.labels_format = self.labels_format
return self.pad(image, labels, return_inverter)
class RandomPatch:
'''
Randomly samples a patch from an image. The randomness refers to whatever
randomness may be introduced by the patch coordinate generator, the box filter,
and the patch validator.
Input images may be cropped and/or padded along either or both of the two
spatial dimensions as necessary in order to obtain the required patch.
As opposed to `RandomPatchInf`, it is possible for this transform to fail to produce
an output image at all, in which case it will return `None`. This is useful, because
if this transform is used to generate patches of a fixed size or aspect ratio, then
the caller needs to be able to rely on the output image satisfying the set size or
aspect ratio. It might therefore not be an option to return the unaltered input image
as other random transforms do when they fail to produce a valid transformed image.
'''
def __init__(self,
patch_coord_generator,
box_filter=None,
image_validator=None,
n_trials_max=3,
clip_boxes=True,
prob=1.0,
background=(0,0,0),
can_fail=False,
labels_format={'class_id': 0, 'xmin': 1, 'ymin': 2, 'xmax': 3, 'ymax': 4}):
'''
Arguments:
patch_coord_generator (PatchCoordinateGenerator): A `PatchCoordinateGenerator` object
to generate the positions and sizes of the patches to be sampled from the input images.
box_filter (BoxFilter, optional): Only relevant if ground truth bounding boxes are given.
A `BoxFilter` object to filter out bounding boxes that don't meet the given criteria
after the transformation. Refer to the `BoxFilter` documentation for details. If `None`,
the validity of the bounding boxes is not checked.
image_validator (ImageValidator, optional): Only relevant if ground truth bounding boxes are given.
An `ImageValidator` object to determine whether a sampled patch is valid. If `None`,
any outcome is valid.
n_trials_max (int, optional): Only relevant if ground truth bounding boxes are given.
Determines the maxmial number of trials to sample a valid patch. If no valid patch could
be sampled in `n_trials_max` trials, returns one `None` in place of each regular output.
clip_boxes (bool, optional): Only relevant if ground truth bounding boxes are given.
If `True`, any ground truth bounding boxes will be clipped to lie entirely within the
sampled patch.
prob (float, optional): `(1 - prob)` determines the probability with which the original,
unaltered image is returned.
background (list/tuple, optional): A 3-tuple specifying the RGB color value of the potential
background pixels of the scaled images. In the case of single-channel images,
the first element of `background` will be used as the background pixel value.
can_fail (bool, optional): If `True`, will return `None` if no valid patch could be found after
`n_trials_max` trials. If `False`, will return the unaltered input image in such a case.
labels_format (dict, optional): A dictionary that defines which index in the last axis of the labels
of an image contains which bounding box coordinate. The dictionary maps at least the keywords
'xmin', 'ymin', 'xmax', and 'ymax' to their respective indices within last axis of the labels array.
'''
if not isinstance(patch_coord_generator, PatchCoordinateGenerator):
raise ValueError("`patch_coord_generator` must be an instance of `PatchCoordinateGenerator`.")
if not (isinstance(image_validator, ImageValidator) or image_validator is None):
raise ValueError("`image_validator` must be either `None` or an `ImageValidator` object.")
self.patch_coord_generator = patch_coord_generator
self.box_filter = box_filter
self.image_validator = image_validator
self.n_trials_max = n_trials_max
self.clip_boxes = clip_boxes
self.prob = prob
self.background = background
self.can_fail = can_fail
self.labels_format = labels_format
self.sample_patch = CropPad(patch_ymin=None,
patch_xmin=None,
patch_height=None,
patch_width=None,
clip_boxes=self.clip_boxes,
box_filter=self.box_filter,
background=self.background,
labels_format=self.labels_format)
def __call__(self, image, labels=None, return_inverter=False):
p = np.random.uniform(0,1)
if p >= (1.0-self.prob):
img_height, img_width = image.shape[:2]
self.patch_coord_generator.img_height = img_height
self.patch_coord_generator.img_width = img_width
xmin = self.labels_format['xmin']
ymin = self.labels_format['ymin']
xmax = self.labels_format['xmax']
ymax = self.labels_format['ymax']
# Override the preset labels format.
if not self.image_validator is None:
self.image_validator.labels_format = self.labels_format
self.sample_patch.labels_format = self.labels_format
for _ in range(max(1, self.n_trials_max)):
# Generate patch coordinates.
patch_ymin, patch_xmin, patch_height, patch_width = self.patch_coord_generator()
self.sample_patch.patch_ymin = patch_ymin
self.sample_patch.patch_xmin = patch_xmin
self.sample_patch.patch_height = patch_height
self.sample_patch.patch_width = patch_width
if (labels is None) or (self.image_validator is None):
# We either don't have any boxes or if we do, we will accept any outcome as valid.
return self.sample_patch(image, labels, return_inverter)
else:
# Translate the box coordinates to the patch's coordinate system.
new_labels = np.copy(labels)
new_labels[:, [ymin, ymax]] -= patch_ymin
new_labels[:, [xmin, xmax]] -= patch_xmin
# Check if the patch is valid.
if self.image_validator(labels=new_labels,
image_height=patch_height,
image_width=patch_width):
return self.sample_patch(image, labels, return_inverter)
# If we weren't able to sample a valid patch...
if self.can_fail:
# ...return `None`.
if labels is None:
if return_inverter:
return None, None
else:
return None
else:
if return_inverter:
return None, None, None
else:
return None, None
else:
# ...return the unaltered input image.
if labels is None:
if return_inverter:
return image, None
else:
return image
else:
if return_inverter:
return image, labels, None
else:
return image, labels
else:
if return_inverter:
def inverter(labels):
return labels
if labels is None:
if return_inverter:
return image, inverter
else:
return image
else:
if return_inverter:
return image, labels, inverter
else:
return image, labels
class RandomPatchInf:
'''
Randomly samples a patch from an image. The randomness refers to whatever
randomness may be introduced by the patch coordinate generator, the box filter,
and the patch validator.
Input images may be cropped and/or padded along either or both of the two
spatial dimensions as necessary in order to obtain the required patch.
This operation is very similar to `RandomPatch`, except that:
1. This operation runs indefinitely until either a valid patch is found or
the input image is returned unaltered, i.e. it cannot fail.
2. If a bound generator is given, a new pair of bounds will be generated
every `n_trials_max` iterations.
'''
def __init__(self,
patch_coord_generator,
box_filter=None,
image_validator=None,
bound_generator=None,
n_trials_max=50,
clip_boxes=True,
prob=0.857,
background=(0,0,0),
labels_format={'class_id': 0, 'xmin': 1, 'ymin': 2, 'xmax': 3, 'ymax': 4}):
'''
Arguments:
patch_coord_generator (PatchCoordinateGenerator): A `PatchCoordinateGenerator` object
to generate the positions and sizes of the patches to be sampled from the input images.
box_filter (BoxFilter, optional): Only relevant if ground truth bounding boxes are given.
A `BoxFilter` object to filter out bounding boxes that don't meet the given criteria
after the transformation. Refer to the `BoxFilter` documentation for details. If `None`,
the validity of the bounding boxes is not checked.
image_validator (ImageValidator, optional): Only relevant if ground truth bounding boxes are given.
An `ImageValidator` object to determine whether a sampled patch is valid. If `None`,
any outcome is valid.
bound_generator (BoundGenerator, optional): A `BoundGenerator` object to generate upper and
lower bound values for the patch validator. Every `n_trials_max` trials, a new pair of
upper and lower bounds will be generated until a valid patch is found or the original image
is returned. This bound generator overrides the bound generator of the patch validator.
n_trials_max (int, optional): Only relevant if ground truth bounding boxes are given.
The sampler will run indefinitely until either a valid patch is found or the original image
is returned, but this determines the maxmial number of trials to sample a valid patch for each
selected pair of lower and upper bounds before a new pair is picked.
clip_boxes (bool, optional): Only relevant if ground truth bounding boxes are given.
If `True`, any ground truth bounding boxes will be clipped to lie entirely within the
sampled patch.
prob (float, optional): `(1 - prob)` determines the probability with which the original,
unaltered image is returned.
background (list/tuple, optional): A 3-tuple specifying the RGB color value of the potential
background pixels of the scaled images. In the case of single-channel images,
the first element of `background` will be used as the background pixel value.
labels_format (dict, optional): A dictionary that defines which index in the last axis of the labels
of an image contains which bounding box coordinate. The dictionary maps at least the keywords
'xmin', 'ymin', 'xmax', and 'ymax' to their respective indices within last axis of the labels array.
'''
if not isinstance(patch_coord_generator, PatchCoordinateGenerator):
raise ValueError("`patch_coord_generator` must be an instance of `PatchCoordinateGenerator`.")
if not (isinstance(image_validator, ImageValidator) or image_validator is None):
raise ValueError("`image_validator` must be either `None` or an `ImageValidator` object.")
if not (isinstance(bound_generator, BoundGenerator) or bound_generator is None):
raise ValueError("`bound_generator` must be either `None` or a `BoundGenerator` object.")
self.patch_coord_generator = patch_coord_generator
self.box_filter = box_filter
self.image_validator = image_validator
self.bound_generator = bound_generator
self.n_trials_max = n_trials_max
self.clip_boxes = clip_boxes
self.prob = prob
self.background = background
self.labels_format = labels_format
self.sample_patch = CropPad(patch_ymin=None,
patch_xmin=None,
patch_height=None,
patch_width=None,
clip_boxes=self.clip_boxes,
box_filter=self.box_filter,
background=self.background,
labels_format=self.labels_format)
def __call__(self, image, labels=None, return_inverter=False):
img_height, img_width = image.shape[:2]
self.patch_coord_generator.img_height = img_height
self.patch_coord_generator.img_width = img_width
xmin = self.labels_format['xmin']
ymin = self.labels_format['ymin']
xmax = self.labels_format['xmax']
ymax = self.labels_format['ymax']
# Override the preset labels format.
if not self.image_validator is None:
self.image_validator.labels_format = self.labels_format
self.sample_patch.labels_format = self.labels_format
while True: # Keep going until we either find a valid patch or return the original image.
p = np.random.uniform(0,1)
if p >= (1.0-self.prob):
# In case we have a bound generator, pick a lower and upper bound for the patch validator.
if not ((self.image_validator is None) or (self.bound_generator is None)):
self.image_validator.bounds = self.bound_generator()
# Use at most `self.n_trials_max` attempts to find a crop
# that meets our requirements.
for _ in range(max(1, self.n_trials_max)):
# Generate patch coordinates.
patch_ymin, patch_xmin, patch_height, patch_width = self.patch_coord_generator()
self.sample_patch.patch_ymin = patch_ymin
self.sample_patch.patch_xmin = patch_xmin
self.sample_patch.patch_height = patch_height
self.sample_patch.patch_width = patch_width
# Check if the resulting patch meets the aspect ratio requirements.
aspect_ratio = patch_width / patch_height
if not (self.patch_coord_generator.min_aspect_ratio <= aspect_ratio <= self.patch_coord_generator.max_aspect_ratio):
continue
if (labels is None) or (self.image_validator is None):
# We either don't have any boxes or if we do, we will accept any outcome as valid.
return self.sample_patch(image, labels, return_inverter)
else:
# Translate the box coordinates to the patch's coordinate system.
new_labels = np.copy(labels)
new_labels[:, [ymin, ymax]] -= patch_ymin
new_labels[:, [xmin, xmax]] -= patch_xmin
# Check if the patch contains the minimum number of boxes we require.
if self.image_validator(labels=new_labels,
image_height=patch_height,
image_width=patch_width):
return self.sample_patch(image, labels, return_inverter)
else:
if return_inverter:
def inverter(labels):
return labels
if labels is None:
if return_inverter:
return image, inverter
else:
return image
else:
if return_inverter:
return image, labels, inverter
else:
return image, labels
class RandomMaxCropFixedAR:
'''
Crops the largest possible patch of a given fixed aspect ratio
from an image.
Since the aspect ratio of the sampled patches is constant, they
can subsequently be resized to the same size without distortion.
'''
def __init__(self,
patch_aspect_ratio,
box_filter=None,
image_validator=None,
n_trials_max=3,
clip_boxes=True,
labels_format={'class_id': 0, 'xmin': 1, 'ymin': 2, 'xmax': 3, 'ymax': 4}):
'''
Arguments:
patch_aspect_ratio (float): The fixed aspect ratio that all sampled patches will have.
box_filter (BoxFilter, optional): Only relevant if ground truth bounding boxes are given.
A `BoxFilter` object to filter out bounding boxes that don't meet the given criteria
after the transformation. Refer to the `BoxFilter` documentation for details. If `None`,
the validity of the bounding boxes is not checked.
image_validator (ImageValidator, optional): Only relevant if ground truth bounding boxes are given.
An `ImageValidator` object to determine whether a sampled patch is valid. If `None`,
any outcome is valid.
n_trials_max (int, optional): Only relevant if ground truth bounding boxes are given.
Determines the maxmial number of trials to sample a valid patch. If no valid patch could
be sampled in `n_trials_max` trials, returns `None`.
clip_boxes (bool, optional): Only relevant if ground truth bounding boxes are given.
If `True`, any ground truth bounding boxes will be clipped to lie entirely within the
sampled patch.
labels_format (dict, optional): A dictionary that defines which index in the last axis of the labels
of an image contains which bounding box coordinate. The dictionary maps at least the keywords
'xmin', 'ymin', 'xmax', and 'ymax' to their respective indices within last axis of the labels array.
'''
self.patch_aspect_ratio = patch_aspect_ratio
self.box_filter = box_filter
self.image_validator = image_validator
self.n_trials_max = n_trials_max
self.clip_boxes = clip_boxes
self.labels_format = labels_format
self.random_patch = RandomPatch(patch_coord_generator=PatchCoordinateGenerator(), # Just a dummy object
box_filter=self.box_filter,
image_validator=self.image_validator,
n_trials_max=self.n_trials_max,
clip_boxes=self.clip_boxes,
prob=1.0,
can_fail=False,
labels_format=self.labels_format)
def __call__(self, image, labels=None, return_inverter=False):
img_height, img_width = image.shape[:2]
# The ratio of the input image aspect ratio and patch aspect ratio determines the maximal possible crop.
image_aspect_ratio = img_width / img_height
if image_aspect_ratio < self.patch_aspect_ratio:
patch_width = img_width
patch_height = int(round(patch_width / self.patch_aspect_ratio))
else:
patch_height = img_height
patch_width = int(round(patch_height * self.patch_aspect_ratio))
# Now that we know the desired height and width for the patch,
# instantiate an appropriate patch coordinate generator.
patch_coord_generator = PatchCoordinateGenerator(img_height=img_height,
img_width=img_width,
must_match='h_w',
patch_height=patch_height,
patch_width=patch_width)
# The rest of the work is done by `RandomPatch`.
self.random_patch.patch_coord_generator = patch_coord_generator
self.random_patch.labels_format = self.labels_format
return self.random_patch(image, labels, return_inverter)
class RandomPadFixedAR:
'''
Adds the minimal possible padding to an image that results in a patch
of the given fixed aspect ratio that contains the entire image.
Since the aspect ratio of the resulting images is constant, they
can subsequently be resized to the same size without distortion.
'''
def __init__(self,
patch_aspect_ratio,
background=(0,0,0),
labels_format={'class_id': 0, 'xmin': 1, 'ymin': 2, 'xmax': 3, 'ymax': 4}):
'''
Arguments:
patch_aspect_ratio (float): The fixed aspect ratio that all sampled patches will have.
background (list/tuple, optional): A 3-tuple specifying the RGB color value of the potential
background pixels of the scaled images. In the case of single-channel images,
the first element of `background` will be used as the background pixel value.
labels_format (dict, optional): A dictionary that defines which index in the last axis of the labels
of an image contains which bounding box coordinate. The dictionary maps at least the keywords
'xmin', 'ymin', 'xmax', and 'ymax' to their respective indices within last axis of the labels array.
'''
self.patch_aspect_ratio = patch_aspect_ratio
self.background = background
self.labels_format = labels_format
self.random_patch = RandomPatch(patch_coord_generator=PatchCoordinateGenerator(), # Just a dummy object
box_filter=None,
image_validator=None,
n_trials_max=1,
clip_boxes=False,
background=self.background,
prob=1.0,
labels_format=self.labels_format)
def __call__(self, image, labels=None, return_inverter=False):
img_height, img_width = image.shape[:2]
if img_width < img_height:
patch_height = img_height
patch_width = int(round(patch_height * self.patch_aspect_ratio))
else:
patch_width = img_width
patch_height = int(round(patch_width / self.patch_aspect_ratio))
# Now that we know the desired height and width for the patch,
# instantiate an appropriate patch coordinate generator.
patch_coord_generator = PatchCoordinateGenerator(img_height=img_height,
img_width=img_width,
must_match='h_w',
patch_height=patch_height,
patch_width=patch_width)
# The rest of the work is done by `RandomPatch`.
self.random_patch.patch_coord_generator = patch_coord_generator
self.random_patch.labels_format = self.labels_format
return self.random_patch(image, labels, return_inverter)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/builders/data_generator/object_detection_2d_patch_sampling_ops.py |
'''
Various photometric image transformations, both deterministic and probabilistic.
Copyright (C) 2018 Pierluigi Ferrari
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from __future__ import division
import numpy as np
import cv2
class ConvertColor:
'''
Converts images between RGB, HSV and grayscale color spaces. This is just a wrapper
around `cv2.cvtColor()`.
'''
def __init__(self, current='RGB', to='HSV', keep_3ch=True):
'''
Arguments:
current (str, optional): The current color space of the images. Can be
one of 'RGB' and 'HSV'.
to (str, optional): The target color space of the images. Can be one of
'RGB', 'HSV', and 'GRAY'.
keep_3ch (bool, optional): Only relevant if `to == GRAY`.
If `True`, the resulting grayscale images will have three channels.
'''
if not ((current in {'RGB', 'HSV'}) and (to in {'RGB', 'HSV', 'GRAY'})):
raise NotImplementedError
self.current = current
self.to = to
self.keep_3ch = keep_3ch
def __call__(self, image, labels=None):
if self.current == 'RGB' and self.to == 'HSV':
image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
elif self.current == 'RGB' and self.to == 'GRAY':
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
if self.keep_3ch:
image = np.stack([image] * 3, axis=-1)
elif self.current == 'HSV' and self.to == 'RGB':
image = cv2.cvtColor(image, cv2.COLOR_HSV2RGB)
elif self.current == 'HSV' and self.to == 'GRAY':
image = cv2.cvtColor(image, cv2.COLOR_HSV2GRAY)
if self.keep_3ch:
image = np.stack([image] * 3, axis=-1)
if labels is None:
return image
else:
return image, labels
class ConvertDataType:
'''
Converts images represented as Numpy arrays between `uint8` and `float32`.
Serves as a helper for certain photometric distortions. This is just a wrapper
around `np.ndarray.astype()`.
'''
def __init__(self, to='uint8'):
'''
Arguments:
to (string, optional): To which datatype to convert the input images.
Can be either of 'uint8' and 'float32'.
'''
if not (to == 'uint8' or to == 'float32'):
raise ValueError("`to` can be either of 'uint8' or 'float32'.")
self.to = to
def __call__(self, image, labels=None):
if self.to == 'uint8':
image = np.round(image, decimals=0).astype(np.uint8)
else:
image = image.astype(np.float32)
if labels is None:
return image
else:
return image, labels
class ConvertTo3Channels:
'''
Converts 1-channel and 4-channel images to 3-channel images. Does nothing to images that
already have 3 channels. In the case of 4-channel images, the fourth channel will be
discarded.
'''
def __init__(self):
pass
def __call__(self, image, labels=None):
if image.ndim == 2:
image = np.stack([image] * 3, axis=-1)
elif image.ndim == 3:
if image.shape[2] == 1:
image = np.concatenate([image] * 3, axis=-1)
elif image.shape[2] == 4:
image = image[:,:,:3]
if labels is None:
return image
else:
return image, labels
class Hue:
'''
Changes the hue of HSV images.
Important:
- Expects HSV input.
- Expects input array to be of `dtype` `float`.
'''
def __init__(self, delta):
'''
Arguments:
delta (int): An integer in the closed interval `[-180, 180]` that determines the hue change, where
a change by integer `delta` means a change by `2 * delta` degrees. Read up on the HSV color format
if you need more information.
'''
if not (-180 <= delta <= 180): raise ValueError("`delta` must be in the closed interval `[-180, 180]`.")
self.delta = delta
def __call__(self, image, labels=None):
image[:, :, 0] = (image[:, :, 0] + self.delta) % 180.0
if labels is None:
return image
else:
return image, labels
class RandomHue:
'''
Randomly changes the hue of HSV images.
Important:
- Expects HSV input.
- Expects input array to be of `dtype` `float`.
'''
def __init__(self, max_delta=18, prob=0.5):
'''
Arguments:
max_delta (int): An integer in the closed interval `[0, 180]` that determines the maximal absolute
hue change.
prob (float, optional): `(1 - prob)` determines the probability with which the original,
unaltered image is returned.
'''
if not (0 <= max_delta <= 180): raise ValueError("`max_delta` must be in the closed interval `[0, 180]`.")
self.max_delta = max_delta
self.prob = prob
self.change_hue = Hue(delta=0)
def __call__(self, image, labels=None):
p = np.random.uniform(0,1)
if p >= (1.0-self.prob):
self.change_hue.delta = np.random.uniform(-self.max_delta, self.max_delta)
return self.change_hue(image, labels)
elif labels is None:
return image
else:
return image, labels
class Saturation:
'''
Changes the saturation of HSV images.
Important:
- Expects HSV input.
- Expects input array to be of `dtype` `float`.
'''
def __init__(self, factor):
'''
Arguments:
factor (float): A float greater than zero that determines saturation change, where
values less than one result in less saturation and values greater than one result
in more saturation.
'''
if factor <= 0.0: raise ValueError("It must be `factor > 0`.")
self.factor = factor
def __call__(self, image, labels=None):
image[:,:,1] = np.clip(image[:,:,1] * self.factor, 0, 255)
if labels is None:
return image
else:
return image, labels
class RandomSaturation:
'''
Randomly changes the saturation of HSV images.
Important:
- Expects HSV input.
- Expects input array to be of `dtype` `float`.
'''
def __init__(self, lower=0.3, upper=2.0, prob=0.5):
'''
Arguments:
lower (float, optional): A float greater than zero, the lower bound for the random
saturation change.
upper (float, optional): A float greater than zero, the upper bound for the random
saturation change. Must be greater than `lower`.
prob (float, optional): `(1 - prob)` determines the probability with which the original,
unaltered image is returned.
'''
self.lower = lower
self.upper = upper
self.prob = prob
self.change_saturation = Saturation(factor=1.0)
def __call__(self, image, labels=None):
p = np.random.uniform(0,1)
if p >= (1.0-self.prob):
self.change_saturation.factor = np.random.uniform(self.lower, self.upper)
return self.change_saturation(image, labels)
elif labels is None:
return image
else:
return image, labels
class Brightness:
'''
Changes the brightness of RGB images.
Important:
- Expects RGB input.
- Expects input array to be of `dtype` `float`.
'''
def __init__(self, delta):
'''
Arguments:
delta (int): An integer, the amount to add to or subtract from the intensity
of every pixel.
'''
self.delta = delta
def __call__(self, image, labels=None):
image = np.clip(image + self.delta, 0, 255)
if labels is None:
return image
else:
return image, labels
class RandomBrightness:
'''
Randomly changes the brightness of RGB images.
Important:
- Expects RGB input.
- Expects input array to be of `dtype` `float`.
'''
def __init__(self, lower=-84, upper=84, prob=0.5):
'''
Arguments:
lower (int, optional): An integer, the lower bound for the random brightness change.
upper (int, optional): An integer, the upper bound for the random brightness change.
Must be greater than `lower`.
prob (float, optional): `(1 - prob)` determines the probability with which the original,
unaltered image is returned.
'''
self.lower = float(lower)
self.upper = float(upper)
self.prob = prob
self.change_brightness = Brightness(delta=0)
def __call__(self, image, labels=None):
p = np.random.uniform(0,1)
if p >= (1.0-self.prob):
self.change_brightness.delta = np.random.uniform(self.lower, self.upper)
return self.change_brightness(image, labels)
elif labels is None:
return image
else:
return image, labels
class Contrast:
'''
Changes the contrast of RGB images.
Important:
- Expects RGB input.
- Expects input array to be of `dtype` `float`.
'''
def __init__(self, factor):
'''
Arguments:
factor (float): A float greater than zero that determines contrast change, where
values less than one result in less contrast and values greater than one result
in more contrast.
'''
if factor <= 0.0: raise ValueError("It must be `factor > 0`.")
self.factor = factor
def __call__(self, image, labels=None):
image = np.clip(127.5 + self.factor * (image - 127.5), 0, 255)
if labels is None:
return image
else:
return image, labels
class RandomContrast:
'''
Randomly changes the contrast of RGB images.
Important:
- Expects RGB input.
- Expects input array to be of `dtype` `float`.
'''
def __init__(self, lower=0.5, upper=1.5, prob=0.5):
'''
Arguments:
lower (float, optional): A float greater than zero, the lower bound for the random
contrast change.
upper (float, optional): A float greater than zero, the upper bound for the random
contrast change. Must be greater than `lower`.
prob (float, optional): `(1 - prob)` determines the probability with which the original,
unaltered image is returned.
'''
self.lower = lower
self.upper = upper
self.prob = prob
self.change_contrast = Contrast(factor=1.0)
def __call__(self, image, labels=None):
p = np.random.uniform(0,1)
if p >= (1.0-self.prob):
self.change_contrast.factor = np.random.uniform(self.lower, self.upper)
return self.change_contrast(image, labels)
elif labels is None:
return image
else:
return image, labels
class Gamma:
'''
Changes the gamma value of RGB images.
Important: Expects RGB input.
'''
def __init__(self, gamma):
'''
Arguments:
gamma (float): A float greater than zero that determines gamma change.
'''
if gamma <= 0.0: raise ValueError("It must be `gamma > 0`.")
self.gamma = gamma
self.gamma_inv = 1.0 / gamma
# Build a lookup table mapping the pixel values [0, 255] to
# their adjusted gamma values.
self.table = np.array([((i / 255.0) ** self.gamma_inv) * 255 for i in np.arange(0, 256)]).astype("uint8")
def __call__(self, image, labels=None):
image = cv2.LUT(image, self.table)
if labels is None:
return image
else:
return image, labels
class RandomGamma:
'''
Randomly changes the gamma value of RGB images.
Important: Expects RGB input.
'''
def __init__(self, lower=0.25, upper=2.0, prob=0.5):
'''
Arguments:
lower (float, optional): A float greater than zero, the lower bound for the random
gamma change.
upper (float, optional): A float greater than zero, the upper bound for the random
gamma change. Must be greater than `lower`.
prob (float, optional): `(1 - prob)` determines the probability with which the original,
unaltered image is returned.
'''
self.lower = lower
self.upper = upper
self.prob = prob
def __call__(self, image, labels=None):
p = np.random.uniform(0,1)
if p >= (1.0-self.prob):
gamma = np.random.uniform(self.lower, self.upper)
change_gamma = Gamma(gamma=gamma)
return change_gamma(image, labels)
elif labels is None:
return image
else:
return image, labels
class HistogramEqualization:
'''
Performs histogram equalization on HSV images.
Importat: Expects HSV input.
'''
def __init__(self):
pass
def __call__(self, image, labels=None):
image[:,:,2] = cv2.equalizeHist(image[:,:,2])
if labels is None:
return image
else:
return image, labels
class RandomHistogramEqualization:
'''
Randomly performs histogram equalization on HSV images. The randomness only refers
to whether or not the equalization is performed.
Importat: Expects HSV input.
'''
def __init__(self, prob=0.5):
'''
Arguments:
prob (float, optional): `(1 - prob)` determines the probability with which the original,
unaltered image is returned.
'''
self.prob = prob
self.equalize = HistogramEqualization()
def __call__(self, image, labels=None):
p = np.random.uniform(0,1)
if p >= (1.0-self.prob):
return self.equalize(image, labels)
elif labels is None:
return image
else:
return image, labels
class ChannelSwap:
'''
Swaps the channels of images.
'''
def __init__(self, order):
'''
Arguments:
order (tuple): A tuple of integers that defines the desired channel order
of the input images after the channel swap.
'''
self.order = order
def __call__(self, image, labels=None):
image = image[:,:,self.order]
if labels is None:
return image
else:
return image, labels
class RandomChannelSwap:
'''
Randomly swaps the channels of RGB images.
Important: Expects RGB input.
'''
def __init__(self, prob=0.5):
'''
Arguments:
prob (float, optional): `(1 - prob)` determines the probability with which the original,
unaltered image is returned.
'''
self.prob = prob
# All possible permutations of the three image channels except the original order.
self.permutations = ((0, 2, 1),
(1, 0, 2), (1, 2, 0),
(2, 0, 1), (2, 1, 0))
self.swap_channels = ChannelSwap(order=(0, 1, 2))
def __call__(self, image, labels=None):
p = np.random.uniform(0,1)
if p >= (1.0-self.prob):
i = np.random.randint(5) # There are 6 possible permutations.
self.swap_channels.order = self.permutations[i]
return self.swap_channels(image, labels)
elif labels is None:
return image
else:
return image, labels
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/builders/data_generator/object_detection_2d_photometric_ops.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unified eval and mAP callback."""
from multiprocessing import cpu_count
import sys
from keras import backend as K
from keras.utils.data_utils import OrderedEnqueuer
import numpy as np
from tqdm import trange
from nvidia_tao_tf1.cv.common.callbacks.detection_metric_callback import DetectionMetricCallback
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
class DetectionMetricCallbackBG(DetectionMetricCallback):
'''
Callback function to calculate model mAP / validation loss per k epoch.
Args:
ap_evaluator: object of class APEvaluator.
built_eval_model: eval model built with additional layers for encoded output AND bbox
output (model requires two outputs!!!)
eval_sequence: Eval data sequence (based on keras sequence) that gives images, labels.
labels is list (batch_size) of tuples (encoded_label, raw_label)
loss_ops: three element tuple or list. [gt_placeholder, pred_placeholder, loss]
eval_model: the training graph part of built_eval_model. Note, this model must share
TF nodes with built_eval_model
metric_interval: calculate model mAP per k epoch
verbose: True if you want print ap message.
'''
def __init__(self, ap_evaluator, built_eval_model, eval_sequence, loss_ops, *args, **kwargs):
"""Init function."""
super().__init__(ap_evaluator=ap_evaluator,
built_eval_model=built_eval_model,
eval_sequence=eval_sequence,
loss_ops=loss_ops,
*args, **kwargs)
self.ap_evaluator = ap_evaluator
self.built_eval_model = built_eval_model
self.classes = eval_sequence.classes
self.enqueuer = OrderedEnqueuer(eval_sequence, use_multiprocessing=False)
self.n_batches = len(eval_sequence)
self.loss_ops = loss_ops
self.output_height = eval_sequence.output_height
self.output_width = eval_sequence.output_width
def _skip_metric(self, logs):
for i in self.classes:
logs['AP_' + i] = np.float64(np.nan)
logs['mAP'] = np.float64(np.nan)
logs['validation_loss'] = np.float64(np.nan)
def _calc_metric(self, logs):
total_loss = 0.0
gt_labels = []
pred_labels = []
if self.verbose:
tr = trange(self.n_batches, file=sys.stdout)
tr.set_description('Producing predictions')
else:
tr = range(self.n_batches)
self.enqueuer.start(workers=max(cpu_count() - 1, 1), max_queue_size=20)
output_generator = self.enqueuer.get()
# Loop over all batches.
for _ in tr:
# Generate batch.
batch_X, batch_labs = next(output_generator)
encoded_lab, gt_lab = zip(*batch_labs)
# Predict.
y_pred_encoded, y_pred = self.built_eval_model.predict(batch_X)
batch_loss = K.get_session().run(self.loss_ops[2],
feed_dict={self.loss_ops[0]: np.array(encoded_lab),
self.loss_ops[1]: y_pred_encoded})
total_loss += np.sum(batch_loss) * len(gt_lab)
gt_labels.extend(gt_lab)
for i in range(len(y_pred)):
y_pred_valid = y_pred[i][y_pred[i][:, 1] > self.ap_evaluator.conf_thres]
y_pred_valid[..., 2] = np.clip(y_pred_valid[..., 2].round(), 0.0,
self.output_width)
y_pred_valid[..., 3] = np.clip(y_pred_valid[..., 3].round(), 0.0,
self.output_height)
y_pred_valid[..., 4] = np.clip(y_pred_valid[..., 4].round(), 0.0,
self.output_width)
y_pred_valid[..., 5] = np.clip(y_pred_valid[..., 5].round(), 0.0,
self.output_height)
pred_labels.append(y_pred_valid)
self.enqueuer.stop()
logs['validation_loss'] = total_loss / len(gt_labels)
m_ap, ap = self.ap_evaluator(gt_labels, pred_labels, verbose=self.verbose)
m_ap = np.mean(ap[1:])
if self.verbose:
print("*******************************")
for i in range(len(self.classes)):
logs['AP_' + self.classes[i]] = np.float64(ap[i+1])
if self.verbose:
print("{:<14}{:<6}{}".format(self.classes[i], 'AP', round(ap[i+1], 5)))
if self.verbose:
print("{:<14}{:<6}{}".format('', 'mAP', round(m_ap, 5)))
print("*******************************")
print("Validation loss:", logs['validation_loss'])
logs['mAP'] = m_ap
graphical_data = {
"validation loss": round(logs['validation_loss'], 8),
"mean average precision": round(logs['mAP'], 5)
}
s_logger = status_logging.get_status_logger()
if isinstance(s_logger, status_logging.StatusLogger):
s_logger.graphical = graphical_data
s_logger.write(
status_level=status_logging.Status.RUNNING,
message="Evaluation metrics generated."
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/callbacks/detection_metric_callback_bg.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/callbacks/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perform continuous RetinaNet training on a tfrecords dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras import backend as K
from keras.callbacks import Callback
import numpy as np
from nvidia_tao_tf1.cv.common.inferencer.evaluator import Evaluator
from nvidia_tao_tf1.cv.common.utils import ap_mode_dict
from nvidia_tao_tf1.cv.ssd.builders import eval_builder
class ap_callback(Callback):
'''Callback function to calculate model mAP per k epoch.'''
def __init__(self, val_dataset, every_k, im_width, im_height,
batch_size, classes, ap_mode, model_eval,
confidence_thresh, clustering_iou, top_k,
nms_max_output_size, matching_iou, verbose):
'''init function, val_dataset should not be encoded.'''
self.val_dataset = val_dataset
self.im_width = im_width
self.im_height = im_height
self.classes = classes
self.ap_mode = ap_mode
self.model_eval = model_eval
self.every_k = every_k if every_k > 0 else 5
self.batch_size = batch_size if batch_size > 0 else 32
self.confidence_thresh = confidence_thresh if confidence_thresh > 0 else 0.05
self.clustering_iou = clustering_iou if clustering_iou > 0 else 0.5
self.top_k = top_k if top_k > 0 else 200
self.nms_max_output_size = nms_max_output_size if nms_max_output_size else 1000
self.matching_iou = matching_iou if matching_iou > 0 else 0.5
self.verbose = verbose
def on_epoch_end(self, epoch, logs):
'''evaluates on epoch end.'''
if (epoch + 1) % self.every_k != 0:
for i in self.classes:
logs['AP_' + i] = np.nan
logs['mAP'] = np.nan
return
K.set_learning_phase(0)
eval_model = eval_builder.build(self.model_eval,
self.confidence_thresh,
self.clustering_iou,
self.top_k,
self.nms_max_output_size)
evaluator = Evaluator(keras_model=eval_model,
n_classes=len(self.classes),
batch_size=self.batch_size,
infer_process_fn=(lambda inf, y: y))
results = evaluator(data_generator=self.val_dataset,
matching_iou_threshold=self.matching_iou,
num_recall_points=11,
average_precision_mode=ap_mode_dict[self.ap_mode],
verbose=self.verbose)
mean_average_precision, average_precisions = results
K.set_learning_phase(1)
if self.verbose:
print("*******************************")
for i in range(len(average_precisions)):
logs['AP_'+self.classes[i]] = average_precisions[i]
if self.verbose:
print("{:<14}{:<6}{}".format(self.classes[i], 'AP',
round(average_precisions[i], 3)))
if self.verbose:
print("{:<14}{:<6}{}".format('', 'mAP', round(mean_average_precision, 3)))
print("*******************************")
logs['mAP'] = mean_average_precision
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/callbacks/ap_callback.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encrypted model saver callback."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.callbacks import Callback
from nvidia_tao_tf1.cv.ssd.utils.model_io import save_model
class KerasModelSaver(Callback):
"""Save the encrypted model after every epoch.
Attributes:
filepath: formated string for saving models
ENC_KEY: API key to encrypt the model.
"""
def __init__(self, filepath, key, save_model, verbose=1):
"""Initialization with encryption key."""
self.filepath = filepath
self._ENC_KEY = key
self.verbose = verbose
self.save_model = save_model
def on_epoch_end(self, epoch, logs=None):
"""Called at the end of an epoch."""
# self.save_model.set_weights(self.model.get_weights())
fname = self.filepath.format(epoch=epoch + 1)
fname = save_model(self.model, fname, str.encode(self._ENC_KEY), '.hdf5')
if self.verbose > 0:
print('\nEpoch %05d: saving model to %s' % (epoch + 1, fname))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/callbacks/enc_model_saver.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SSD tensorboard visualization callback."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import keras
import numpy as np
from PIL import Image, ImageDraw
import tensorflow as tf
from nvidia_tao_tf1.cv.common.utils import summary_from_value, TensorBoard
class SSDTensorBoard(TensorBoard):
"""Override the on_epoch_end."""
def on_epoch_end(self, epoch, logs=None):
"""on_epoch_end method."""
for name, value in logs.items():
if "AP" in name or "validation_loss" in name:
summary = summary_from_value(name, value.item())
self.writer.add_summary(summary, epoch)
self.writer.flush()
def make_image_bboxes(tensor, bboxes):
"""Convert an numpy representation image to Image protobuf."""
height, width, channel = tensor.shape
image = Image.fromarray(tensor)
draw = ImageDraw.Draw(image)
for bbox in bboxes:
draw.rectangle(
((bbox[0], bbox[1]), (bbox[2], bbox[3])),
outline="red"
)
output = io.BytesIO()
image.save(output, format='PNG')
image_string = output.getvalue()
output.close()
return tf.Summary.Image(height=height,
width=width,
colorspace=channel,
encoded_image_string=image_string)
class SSDTensorBoardImage(keras.callbacks.Callback):
"""Add the augmented images to Tensorboard and draw bboxes on the images."""
def __init__(self, log_dir, experiment_spec, variances, num_imgs=3):
"""Init the TensorBoardImage."""
super(SSDTensorBoardImage, self).__init__()
self.img = tf.Variable(0., collections=[tf.GraphKeys.LOCAL_VARIABLES], validate_shape=False)
self.label = tf.Variable(0., collections=[tf.GraphKeys.LOCAL_VARIABLES],
validate_shape=False)
self.writer = tf.summary.FileWriter(log_dir)
self.num_imgs = num_imgs
# init the de-mean parameter
augmentation_config = experiment_spec.augmentation_config
bb, gg, rr = 103.939, 116.779, 123.68
img_mean = augmentation_config.image_mean
if img_mean:
if augmentation_config.output_channel == 3:
bb, gg, rr = img_mean['b'], img_mean['g'], img_mean['r']
else:
bb, gg, rr = img_mean['l'], img_mean['l'], img_mean['l']
if augmentation_config.output_channel == 3:
self.img_mean = np.array([[[[bb]], [[gg]], [[rr]]]])
else:
g_mean = bb * 0.1140 + gg * 0.5870 + rr * 0.2990
self.img_mean = np.array([[[[g_mean]]]])
# init the label decode parameter:
self.variances = np.array(variances)
self.img_height = experiment_spec.augmentation_config.output_height
self.img_width = experiment_spec.augmentation_config.output_width
def _deprocess_imgs(self, augmented_imgs):
"""Deprocess the images."""
# add mean and cast to uint8
augmented_imgs = (augmented_imgs + self.img_mean).astype("uint8")
# NCHW -> NHWC:
augmented_imgs = augmented_imgs.transpose(0, 2, 3, 1)
# BGR -> RGB:
augmented_imgs = augmented_imgs[:, :, :, ::-1]
return augmented_imgs
def _decode_label(self, labels):
"""Decode the labels."""
# labels shape: [batch_size, num_anchors, encoded_label]
final_gt_anchors = []
for label_per_img in labels:
# pick up the highest IOU anchors
gt_anchors = label_per_img[label_per_img[:, -1] == 1024.0, :]
cx_gt = gt_anchors[:, -12]
cy_gt = gt_anchors[:, -11]
w_gt = gt_anchors[:, -10]
h_gt = gt_anchors[:, -9]
cx_anchor = gt_anchors[:, -8]
cy_anchor = gt_anchors[:, -7]
w_anchor = gt_anchors[:, -6]
h_anchor = gt_anchors[:, -5]
cx_variance = self.variances[np.newaxis, -4]
cy_variance = self.variances[np.newaxis, -3]
variance_w = self.variances[np.newaxis, -2]
variance_h = self.variances[np.newaxis, -1]
# Convert anchor box offsets to image offsets.
cx = cx_gt * cx_variance * w_anchor + cx_anchor
cy = cy_gt * cy_variance * h_anchor + cy_anchor
w = np.exp(w_gt * variance_w) * w_anchor
h = np.exp(h_gt * variance_h) * h_anchor
# Convert 'centroids' to 'corners'.
xmin = cx - 0.5 * w
ymin = cy - 0.5 * h
xmax = cx + 0.5 * w
ymax = cy + 0.5 * h
xmin = np.expand_dims(xmin * self.img_width, axis=-1)
ymin = np.expand_dims(ymin * self.img_height, axis=-1)
xmax = np.expand_dims(xmax * self.img_width, axis=-1)
ymax = np.expand_dims(ymax * self.img_height, axis=-1)
decoded_anchors = np.concatenate((xmin, ymin, xmax, ymax), axis=-1)
decoded_anchors.astype(np.int32)
final_gt_anchors.append(decoded_anchors)
return final_gt_anchors
def on_batch_end(self, batch, logs=None):
"""On batch end method."""
if batch != 0:
return
augmented_imgs = keras.backend.get_value(self.img)
labels = keras.backend.get_value(self.label)
augmented_imgs = self._deprocess_imgs(augmented_imgs)
labels = self._decode_label(labels)
cnt = 0
summary_values = []
for img, label in zip(augmented_imgs, labels):
if cnt >= self.num_imgs:
break
tb_img = make_image_bboxes(img, label)
summary_values.append(tf.Summary.Value(tag=f"batch_imgs/{cnt}", image=tb_img))
cnt += 1
summary = tf.Summary(value=summary_values)
self.writer.add_summary(summary, batch)
self.writer.flush()
def on_train_end(self, *args, **kwargs):
"""on_train_end method."""
self.writer.close()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/callbacks/tb_callback.py |
'''
Copyright (C) 2018 Pierluigi Ferrari.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from __future__ import division
import numpy as np
def convert_coordinates(tensor, start_index, conversion, border_pixels='half'):
'''
Convert coordinates for axis-aligned 2D boxes between two coordinate formats.
Creates a copy of `tensor`, i.e. does not operate in place. Currently there are
three supported coordinate formats that can be converted from and to each other:
1) (xmin, xmax, ymin, ymax) - the 'minmax' format
2) (xmin, ymin, xmax, ymax) - the 'corners' format
2) (cx, cy, w, h) - the 'centroids' format
Arguments:
tensor (array): A Numpy nD array containing the four consecutive coordinates
to be converted somewhere in the last axis.
start_index (int): The index of the first coordinate in the last axis of `tensor`.
conversion (str, optional): The conversion direction. Can be 'minmax2centroids',
'centroids2minmax', 'corners2centroids', 'centroids2corners', 'minmax2corners',
or 'corners2minmax'.
border_pixels (str, optional): How to treat the border pixels of the bounding boxes.
Can be 'include', 'exclude', or 'half'. If 'include', the border pixels belong
to the boxes. If 'exclude', the border pixels do not belong to the boxes.
If 'half', then one of each of the two horizontal and vertical borders belong
to the boxex, but not the other.
Returns:
A Numpy nD array, a copy of the input tensor with the converted coordinates
in place of the original coordinates and the unaltered elements of the original
tensor elsewhere.
'''
if border_pixels == 'half':
d = 0
elif border_pixels == 'include':
d = 1
elif border_pixels == 'exclude':
d = -1
ind = start_index
tensor1 = np.copy(tensor).astype(np.float)
if conversion == 'minmax2centroids':
tensor1[..., ind] = (tensor[..., ind] +
tensor[..., ind+1]) / 2.0 # Set cx
tensor1[..., ind+1] = (tensor[..., ind+2] +
tensor[..., ind+3]) / 2.0 # Set cy
tensor1[..., ind+2] = tensor[..., ind+1] - \
tensor[..., ind] + d # Set w
tensor1[..., ind+3] = tensor[..., ind+3] - \
tensor[..., ind+2] + d # Set h
elif conversion == 'centroids2minmax':
tensor1[..., ind] = tensor[..., ind] - \
tensor[..., ind+2] / 2.0 # Set xmin
tensor1[..., ind+1] = tensor[..., ind] + \
tensor[..., ind+2] / 2.0 # Set xmax
tensor1[..., ind+2] = tensor[..., ind+1] - \
tensor[..., ind+3] / 2.0 # Set ymin
tensor1[..., ind+3] = tensor[..., ind+1] + \
tensor[..., ind+3] / 2.0 # Set ymax
elif conversion == 'corners2centroids':
tensor1[..., ind] = (tensor[..., ind] +
tensor[..., ind+2]) / 2.0 # Set cx
tensor1[..., ind+1] = (tensor[..., ind+1] +
tensor[..., ind+3]) / 2.0 # Set cy
tensor1[..., ind+2] = tensor[..., ind+2] - \
tensor[..., ind] + d # Set w
tensor1[..., ind+3] = tensor[..., ind+3] - \
tensor[..., ind+1] + d # Set h
elif conversion == 'centroids2corners':
tensor1[..., ind] = tensor[..., ind] - \
tensor[..., ind+2] / 2.0 # Set xmin
tensor1[..., ind+1] = tensor[..., ind+1] - \
tensor[..., ind+3] / 2.0 # Set ymin
tensor1[..., ind+2] = tensor[..., ind] + \
tensor[..., ind+2] / 2.0 # Set xmax
tensor1[..., ind+3] = tensor[..., ind+1] + \
tensor[..., ind+3] / 2.0 # Set ymax
elif conversion in ('minmax2corners', 'corners2minmax'):
tensor1[..., ind+1] = tensor[..., ind+2]
tensor1[..., ind+2] = tensor[..., ind+1]
else:
raise ValueError(
"Unexpected conversion value.")
return tensor1
def convert_coordinates2(tensor, start_index, conversion):
'''
A matrix multiplication implementation of `convert_coordinates()`.
Supports only conversion between the 'centroids' and 'minmax' formats.
This function is marginally slower on average than `convert_coordinates()`,
probably because it involves more (unnecessary) arithmetic operations (unnecessary
because the two matrices are sparse).
For details please refer to the documentation of `convert_coordinates()`.
'''
ind = start_index
tensor1 = np.copy(tensor).astype(np.float)
if conversion == 'minmax2centroids':
M = np.array([[0.5, 0., -1., 0.],
[0.5, 0., 1., 0.],
[0., 0.5, 0., -1.],
[0., 0.5, 0., 1.]])
tensor1[..., ind:ind+4] = np.dot(tensor1[..., ind:ind+4], M)
elif conversion == 'centroids2minmax':
M = np.array([[1., 1., 0., 0.],
[0., 0., 1., 1.],
[-0.5, 0.5, 0., 0.],
[0., 0., -0.5, 0.5]]) # The multiplicative inverse of the matrix above
tensor1[..., ind:ind+4] = np.dot(tensor1[..., ind:ind+4], M)
else:
raise ValueError(
"Unexpected conversion value.")
return tensor1
def intersection_area(boxes1, boxes2,
coords='centroids',
mode='outer_product',
border_pixels='half'):
'''
Computes the intersection areas of two sets of axis-aligned 2D rectangular boxes.
Let `boxes1` and `boxes2` contain `m` and `n` boxes, respectively.
In 'outer_product' mode, returns an `(m,n)` matrix with the intersection areas for all possible
combinations of the boxes in `boxes1` and `boxes2`.
In 'element-wise' mode, `m` and `n` must be broadcast-compatible. Refer to the explanation
of the `mode` argument for details.
'''
# Make sure the boxes have the right shapes.
if boxes1.ndim > 2:
raise ValueError(
"boxes1 must have rank either 1 or 2, but has rank {}.".format(boxes1.ndim))
if boxes2.ndim > 2:
raise ValueError(
"boxes2 must have rank either 1 or 2, but has rank {}.".format(boxes2.ndim))
if boxes1.ndim == 1:
boxes1 = np.expand_dims(boxes1, axis=0)
if boxes2.ndim == 1:
boxes2 = np.expand_dims(boxes2, axis=0)
# Convert the coordinates if necessary.
if coords == 'centroids':
boxes1 = convert_coordinates(
boxes1, start_index=0, conversion='centroids2corners')
boxes2 = convert_coordinates(
boxes2, start_index=0, conversion='centroids2corners')
coords = 'corners'
elif not (coords in {'minmax', 'corners'}):
raise ValueError(
"Unexpected value for `coords`.")
m = boxes1.shape[0] # The number of boxes in `boxes1`
n = boxes2.shape[0] # The number of boxes in `boxes2`
# Set the correct coordinate indices for the respective formats.
if coords == 'corners':
xmin = 0
ymin = 1
xmax = 2
ymax = 3
elif coords == 'minmax':
xmin = 0
xmax = 1
ymin = 2
ymax = 3
if border_pixels == 'half':
d = 0
elif border_pixels == 'include':
d = 1
elif border_pixels == 'exclude':
d = -1
# Compute the intersection areas.
if mode == 'outer_product':
# For all possible box combinations, get the greater xmin and ymin values.
# This is a tensor of shape (m,n,2).
min_xy = np.maximum(np.tile(np.expand_dims(boxes1[:, [xmin, ymin]],
axis=1), reps=(1, n, 1)),
np.tile(np.expand_dims(boxes2[:, [xmin, ymin]],
axis=0), reps=(m, 1, 1)))
# For all possible box combinations, get the smaller xmax and ymax values.
# This is a tensor of shape (m,n,2).
max_xy = np.minimum(np.tile(np.expand_dims(boxes1[:, [xmax, ymax]],
axis=1), reps=(1, n, 1)),
np.tile(np.expand_dims(boxes2[:, [xmax, ymax]],
axis=0), reps=(m, 1, 1)))
# Compute the side lengths of the intersection rectangles.
side_lengths = np.maximum(0, max_xy - min_xy + d)
return side_lengths[:, :, 0] * side_lengths[:, :, 1]
min_xy = np.maximum(boxes1[:, [xmin, ymin]], boxes2[:, [xmin, ymin]])
max_xy = np.minimum(boxes1[:, [xmax, ymax]], boxes2[:, [xmax, ymax]])
# Compute the side lengths of the intersection rectangles.
side_lengths = np.maximum(0, max_xy - min_xy + d)
return side_lengths[:, 0] * side_lengths[:, 1]
def intersection_area_(boxes1, boxes2, coords='corners',
mode='outer_product', border_pixels='half'):
'''The same as 'intersection_area()' without all the safety checks.'''
m = boxes1.shape[0] # The number of boxes in `boxes1`
n = boxes2.shape[0] # The number of boxes in `boxes2`
# Set the correct coordinate indices for the respective formats.
if coords == 'corners':
xmin = 0
ymin = 1
xmax = 2
ymax = 3
elif coords == 'minmax':
xmin = 0
xmax = 1
ymin = 2
ymax = 3
if border_pixels == 'half':
d = 0
elif border_pixels == 'include':
d = 1
elif border_pixels == 'exclude':
d = -1
# Compute the intersection areas.
if mode == 'outer_product':
# For all possible box combinations, get the greater xmin and ymin values.
# This is a tensor of shape (m,n,2).
min_xy = np.maximum(np.tile(np.expand_dims(boxes1[:, [xmin, ymin]],
axis=1), reps=(1, n, 1)),
np.tile(np.expand_dims(boxes2[:, [xmin, ymin]],
axis=0), reps=(m, 1, 1)))
# For all possible box combinations, get the smaller xmax and ymax values.
# This is a tensor of shape (m,n,2).
max_xy = np.minimum(np.tile(np.expand_dims(boxes1[:, [xmax, ymax]],
axis=1), reps=(1, n, 1)),
np.tile(np.expand_dims(boxes2[:, [xmax, ymax]],
axis=0), reps=(m, 1, 1)))
# Compute the side lengths of the intersection rectangles.
side_lengths = np.maximum(0, max_xy - min_xy + d)
return side_lengths[:, :, 0] * side_lengths[:, :, 1]
min_xy = np.maximum(boxes1[:, [xmin, ymin]], boxes2[:, [xmin, ymin]])
max_xy = np.minimum(boxes1[:, [xmax, ymax]], boxes2[:, [xmax, ymax]])
# Compute the side lengths of the intersection rectangles.
side_lengths = np.maximum(0, max_xy - min_xy + d)
return side_lengths[:, 0] * side_lengths[:, 1]
def iou(boxes1, boxes2, coords='centroids', mode='outer_product', border_pixels='half'):
'''
Computes the intersection-over-union similarity.
Let `boxes1` and `boxes2` contain `m` and `n` boxes, respectively.
In 'outer_product' mode, returns an `(m,n)` matrix with the IoUs for all possible
combinations of the boxes in `boxes1` and `boxes2`.
In 'element-wise' mode, `m` and `n` must be broadcast-compatible. Refer to the explanation
of the `mode` argument for details.
'''
# Make sure the boxes have the right shapes.
if boxes1.ndim > 2:
raise ValueError(
"boxes1 must have rank either 1 or 2, but has rank {}.".format(boxes1.ndim))
if boxes2.ndim > 2:
raise ValueError(
"boxes2 must have rank either 1 or 2, but has rank {}.".format(boxes2.ndim))
if boxes1.ndim == 1:
boxes1 = np.expand_dims(boxes1, axis=0)
if boxes2.ndim == 1:
boxes2 = np.expand_dims(boxes2, axis=0)
# Convert the coordinates if necessary.
if coords == 'centroids':
boxes1 = convert_coordinates(
boxes1, start_index=0, conversion='centroids2corners')
boxes2 = convert_coordinates(
boxes2, start_index=0, conversion='centroids2corners')
coords = 'corners'
elif not (coords in {'minmax', 'corners'}):
raise ValueError(
"Unexpected value for `coords`.")
# Compute the IoU.
# Compute the interesection areas.
intersection_areas = intersection_area_(
boxes1, boxes2, coords=coords, mode=mode)
m = boxes1.shape[0] # The number of boxes in `boxes1`
n = boxes2.shape[0] # The number of boxes in `boxes2`
# Compute the union areas.
# Set the correct coordinate indices for the respective formats.
if coords == 'corners':
xmin = 0
ymin = 1
xmax = 2
ymax = 3
elif coords == 'minmax':
xmin = 0
xmax = 1
ymin = 2
ymax = 3
if border_pixels == 'half':
d = 0
elif border_pixels == 'include':
d = 1
elif border_pixels == 'exclude':
d = -1
if mode == 'outer_product':
boxes1_areas = np.tile(np.expand_dims((boxes1[:, xmax] - boxes1[:, xmin] + d) * (
boxes1[:, ymax] - boxes1[:, ymin] + d), axis=1), reps=(1, n))
boxes2_areas = np.tile(np.expand_dims((boxes2[:, xmax] - boxes2[:, xmin] + d) * (
boxes2[:, ymax] - boxes2[:, ymin] + d), axis=0), reps=(m, 1))
elif mode == 'element-wise':
boxes1_areas = (boxes1[:, xmax] - boxes1[:, xmin] + d) * \
(boxes1[:, ymax] - boxes1[:, ymin] + d)
boxes2_areas = (boxes2[:, xmax] - boxes2[:, xmin] + d) * \
(boxes2[:, ymax] - boxes2[:, ymin] + d)
union_areas = boxes1_areas + boxes2_areas - intersection_areas
return intersection_areas / union_areas
def iou_clean(boxes1, boxes2):
'''
numpy version of vectorized iou.
Let `boxes1` and `boxes2` contain `m` and `n` boxes, respectively.
returns an `(m, n)` matrix with the IoUs for all pairwise boxes
Arguments:
boxes1 (array of shape (m, 4)): x_min, y_min, x_max, y_max
boxes2 (array of shape (n, 4)): x_min, y_min, x_max, y_max
Returns:
IOU (array of shape (m, n)): IOU score
'''
# Compute the IoU.
xmin1, ymin1, xmax1, ymax1 = np.split(boxes1, 4, axis=1)
xmin2, ymin2, xmax2, ymax2 = np.split(boxes2, 4, axis=1)
xmin = np.maximum(xmin1, xmin2.T)
ymin = np.maximum(ymin1, ymin2.T)
xmax = np.minimum(xmax1, xmax2.T)
ymax = np.minimum(ymax1, ymax2.T)
intersection = np.maximum(xmax - xmin, 0) * np.maximum(ymax - ymin, 0)
boxes1_areas = (xmax1 - xmin1) * (ymax1 - ymin1)
boxes2_areas = (xmax2 - xmin2) * (ymax2 - ymin2)
union = boxes1_areas + boxes2_areas.T - intersection
return intersection / union
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/box_coder/bounding_box_utils.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/box_coder/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
TF implementation of SSD label encoder.
Code partially from GitHub (Apache v2 license):
https://github.com/pierluigiferrari/ssd_keras/tree/3ac9adaf3889f1020d74b0eeefea281d5e82f353
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from nvidia_tao_tf1.cv.ssd.utils.box_utils import (
bipartite_match_row,
corners_to_centroids,
iou, multi_match
)
from nvidia_tao_tf1.cv.ssd.utils.box_utils import np_convert_coordinates
from nvidia_tao_tf1.cv.ssd.utils.tensor_utils import tensor_slice_replace, tensor_strided_replace
class SSDInputEncoder:
'''
Encoder class.
Transforms ground truth labels for object detection in images
(2D bounding box coordinates and class labels) to the format required for
training an SSD model.
In the process of encoding the ground truth labels, a template of anchor boxes
is being built, which are subsequently matched to the ground truth boxes
via an intersection-over-union threshold criterion.
'''
def __init__(self,
img_height,
img_width,
n_classes,
predictor_sizes,
min_scale=0.1,
max_scale=0.9,
scales=None,
aspect_ratios_global=None,
aspect_ratios_per_layer=None,
two_boxes_for_ar1=True,
steps=None,
offsets=None,
clip_boxes=False,
variances=None,
pos_iou_threshold=0.5,
neg_iou_limit=0.3,
normalize_coords=True,
gt_normalized=False):
'''
Init encoder.
Arguments:
img_height (int): The height of the input images.
img_width (int): The width of the input images.
n_classes (int): The number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO.
predictor_sizes (list): A list of int-tuples of the format `(height, width)`
containing the output heights and widths of the convolutional predictor layers.
min_scale (float, optional): The smallest scaling factor for the size of the anchor
boxes as a fraction of the shorter side of the input images. Note that you should
set the scaling factors such that the resulting anchor box sizes correspond to the
sizes of the objects you are trying to detect. Must be >0.
max_scale (float, optional): The largest scaling factor for the size of the anchor boxes
as a fraction of the shorter side of the input images. All scaling factors between
the smallest and the largest will be linearly interpolated. Note that the second to
last of the linearly interpolated scaling factors will actually be the scaling
factor for the last predictor layer, while the last scaling factor is used for the
second box for aspect ratio 1 in the last predictor layer if `two_boxes_for_ar1` is
`True`. Note that you should set the scaling factors such that the resulting anchor
box sizes correspond to the sizes of the objects you are trying to detect. Must be
greater than or equal to `min_scale`.
scales (list, optional): A list of floats >0 containing scaling factors per
convolutional predictor layer. This list must be one element longer than the number
of predictor layers. The first `k` elements are the scaling factors for the `k`
predictor layers, while the last element is used for the second box for aspect ratio
1 in the last predictor layer if `two_boxes_for_ar1` is `True`. This additional last
scaling factor must be passed either way, even if it is not being used. If a list is
passed, this argument overrides `min_scale` and `max_scale`. All scaling factors
must be greater than zero. Note that you should set the scaling factors such that
the resulting anchor box sizes correspond to the sizes of the objects you are trying
to detect.
aspect_ratios_global (list, optional): The list of aspect ratios for which anchor boxes
are to be generated. This list is valid for all prediction layers. Note that you
should set the aspect ratios such that the resulting anchor box shapes roughly
correspond to the shapes of the objects you are trying to detect.
aspect_ratios_per_layer (list, optional): A list containing one aspect ratio list for
each prediction layer. If a list is passed, it overrides `aspect_ratios_global`.
Note that you should set the aspect ratios such that the resulting anchor box shapes
very roughly correspond to the shapes of the objects you are trying to detect.
two_boxes_for_ar1 (bool, optional): Only relevant for aspect ratios lists that contain
1. Will be ignored otherwise. If `True`, two anchor boxes will be generated for
aspect ratio 1. The first will be generated using the scaling factor for the
respective layer, the second one will be generated using geometric mean of said
scaling factor and next bigger scaling factor.
steps (list, optional): `None` or a list with as many elements as there are predictor
layers. The elements can be either ints/floats or tuples of two ints/floats. These
numbers represent for each predictor layer how many pixels apart the anchor box
center points should be vertically and horizontally along the spatial grid over the
image. If the list contains ints/floats, then that value will be used for both
spatial dimensions. If the list contains tuples of two ints/floats, then they
represent `(step_height, step_width)`. If no steps are provided, then they will be
computed such that the anchor box center points will form an equidistant grid within
the image dimensions.
offsets (list, optional): `None` or a list with as many elements as there are predictor
layers. The elements can be either floats or tuples of two floats. These numbers
represent for each predictor layer how many pixels from the top and left boarders of
the image the top-most and left-most anchor box center points should be as a
fraction of `steps`. The last bit is important: The offsets are not absolute pixel
values, but fractions of the step size specified in the `steps` argument. If the
list contains floats, then that value will be used for both spatial dimensions. If
the list contains tuples of two floats, then they represent
`(vertical_offset, horizontal_offset)`. If no offsets are provided, then they will
default to 0.5 of the step size.
clip_boxes (bool, optional): If `True`, limits the anchor box coordinates to stay within
image boundaries.
variances (list, optional): A list of 4 floats >0. The anchor box offset for each
coordinate will be divided by its respective variance value.
pos_iou_threshold (float, optional): The intersection-over-union similarity threshold
that must be met in order to match a given ground truth box to a given anchor box.
neg_iou_limit (float, optional): The maximum allowed intersection-over-union similarity
of an anchor box with any ground truth box to be labeled a negative
(i.e. background) box. If an anchor box is neither a positive, nor a negative box,
it will be ignored during training.
normalize_coords (bool, optional): If `True`, the encoder uses relative instead of
absolute coordinates. This means instead of using absolute tartget coordinates, the
encoder will scale all coordinates to be within [0,1]. This way learning becomes
independent of the input image size.
'''
predictor_sizes = np.array(predictor_sizes)
if predictor_sizes.ndim == 1:
predictor_sizes = np.expand_dims(predictor_sizes, axis=0)
##################################################################################
# Handle exceptions.
##################################################################################
if (min_scale is None or max_scale is None) and scales is None:
raise ValueError("Either `min_scale` and `max_scale` or `scales` need to be specified.")
if scales:
if (len(scales) != predictor_sizes.shape[0] + 1):
# Must be two nested `if` statements since `list` and `bool` can't be combined by &
raise ValueError("It must be either scales is None or len(scales) == \
len(predictor_sizes)+1, but len(scales) == {} and len(predictor_sizes)+1 == {}"
.format(len(scales), len(predictor_sizes)+1))
scales = np.array(scales)
if np.any(scales <= 0):
raise ValueError("All values in `scales` must be greater than 0, but the passed \
list of scales is {}".format(scales))
else:
# If no scales passed, we make sure that `min_scale` and `max_scale` are valid values.
if not 0 < min_scale <= max_scale:
raise ValueError("It must be 0 < min_scale <= max_scale, but it is min_scale = {} \
and max_scale = {}".format(min_scale, max_scale))
if not (aspect_ratios_per_layer is None):
if (len(aspect_ratios_per_layer) != predictor_sizes.shape[0]):
# Must be two nested `if` statements since `list` and `bool` can't be combined by &
raise ValueError("It must be either aspect_ratios_per_layer is None or \
len(aspect_ratios_per_layer) == len(predictor_sizes), but len(aspect_ratios_per_layer) == {} \
and len(predictor_sizes) == {}".format(len(aspect_ratios_per_layer), len(predictor_sizes)))
for aspect_ratios in aspect_ratios_per_layer:
if np.any(np.array(aspect_ratios) <= 0):
raise ValueError("All aspect ratios must be greater than zero.")
else:
if (aspect_ratios_global is None):
raise ValueError("At least one of `aspect_ratios_global` and \
`aspect_ratios_per_layer` must not be `None`.")
if np.any(np.array(aspect_ratios_global) <= 0):
raise ValueError("All aspect ratios must be greater than zero.")
if len(variances) != 4:
raise ValueError("4 variance values must be pased, but {} values were received."
.format(len(variances)))
variances = np.array(variances)
if np.any(variances <= 0):
raise ValueError("All variances must be >0, but the variances given are {}"
.format(variances))
if (not (steps is None)) and (len(steps) != predictor_sizes.shape[0]):
raise ValueError("You must provide at least one step value per predictor layer.")
if (not (offsets is None)) and (len(offsets) != predictor_sizes.shape[0]):
raise ValueError("You must provide at least one offset value per predictor layer.")
##################################################################################
# Set or compute members.
##################################################################################
self.img_height = float(img_height)
self.img_width = float(img_width)
self.n_classes = n_classes
self.predictor_sizes = predictor_sizes
self.min_scale = min_scale
self.max_scale = max_scale
# If `scales` is None, compute the scaling factors by linearly interpolating between
# `min_scale` and `max_scale`. If an explicit list of `scales` is given, however,
# then it takes precedent over `min_scale` and `max_scale`.
if (scales is None):
self.scales = np.linspace(self.min_scale, self.max_scale, len(self.predictor_sizes)+1)
else:
# If a list of scales is given explicitly, we'll use that instead of computing it from
# `min_scale` and `max_scale`.
self.scales = scales
# If `aspect_ratios_per_layer` is None, then we use the same list of aspect ratios
# `aspect_ratios_global` for all predictor layers. If `aspect_ratios_per_layer` is given,
# however, then it takes precedent over `aspect_ratios_global`.
if (aspect_ratios_per_layer is None):
self.aspect_ratios = [aspect_ratios_global] * predictor_sizes.shape[0]
else:
# If aspect ratios are given per layer, we'll use those.
self.aspect_ratios = aspect_ratios_per_layer
self.two_boxes_for_ar1 = two_boxes_for_ar1
if not (steps is None):
self.steps = steps
else:
self.steps = [None] * predictor_sizes.shape[0]
if not (offsets is None):
self.offsets = offsets
else:
self.offsets = [None] * predictor_sizes.shape[0]
self.clip_boxes = clip_boxes
self.variances = variances
self.pos_iou_threshold = pos_iou_threshold
self.neg_iou_limit = neg_iou_limit
self.normalize_coords = normalize_coords
self.gt_normalized = gt_normalized
# Compute the number of boxes per spatial location for each predictor layer.
# For example, if a predictor layer has three different aspect ratios, [1.0, 0.5, 2.0], and
# is supposed to predict two boxes of slightly different size for aspect ratio 1.0, then
# that predictor layer predicts a total of four boxes at every spatial location across the
# feature map.
if not (aspect_ratios_per_layer is None):
self.n_boxes = []
for aspect_ratios in aspect_ratios_per_layer:
if (1 in aspect_ratios) & two_boxes_for_ar1:
self.n_boxes.append(len(aspect_ratios) + 1)
else:
self.n_boxes.append(len(aspect_ratios))
else:
if (1 in aspect_ratios_global) & two_boxes_for_ar1:
self.n_boxes = len(aspect_ratios_global) + 1
else:
self.n_boxes = len(aspect_ratios_global)
##################################################################################
# Compute the anchor boxes for each predictor layer.
##################################################################################
# Compute the anchor boxes for each predictor layer. We only have to do this once
# since the anchor boxes depend only on the model configuration, not on the input data.
# For each predictor layer (i.e. for each scaling factor) the tensors for that layer's
# anchor boxes will have the shape `(feature_map_height, feature_map_width, n_boxes, 4)`.
boxes_list = [] # This will store the anchor boxes for each predicotr layer.
boxes_corner = []
# Iterate over all predictor layers and compute the anchor boxes for each one.
for i in range(len(self.predictor_sizes)):
boxes, box_corner = self.__anchor_layer(feature_map_size=self.predictor_sizes[i],
aspect_ratios=self.aspect_ratios[i],
this_scale=self.scales[i],
next_scale=self.scales[i+1],
this_steps=self.steps[i],
this_offsets=self.offsets[i])
boxes_list.append(boxes)
boxes_corner.append(box_corner)
anchor_box_list_np = np.concatenate([i.reshape((-1, 4)) for i in boxes_corner], axis=0)
self.anchorbox_tensor = tf.convert_to_tensor(anchor_box_list_np, dtype=tf.float32)
self.encoding_template_tensor = tf.convert_to_tensor(self.__encode_template(boxes_list),
dtype=tf.float32)
def __anchor_layer(self,
feature_map_size,
aspect_ratios,
this_scale,
next_scale,
this_steps=None,
this_offsets=None):
'''
Generate numpy anchors for each layer.
Computes an array of the spatial positions and sizes of the anchor boxes for one predictor
layer of size `feature_map_size == [feature_map_height, feature_map_width]`.
Arguments:
feature_map_size (tuple): A list or tuple `[feature_map_height, feature_map_width]` with
the spatial dimensions of the feature map for which to generate the anchor boxes.
aspect_ratios (list): A list of floats, the aspect ratios for which anchor boxes are to
be generated. All list elements must be unique.
this_scale (float): A float in [0, 1], the scaling factor for the size of the generate
anchor boxes as a fraction of the shorter side of the input image.
next_scale (float): A float in [0, 1], the next larger scaling factor. Only relevant if
`self.two_boxes_for_ar1 == True`.
Returns:
A 4D np tensor of shape `(feature_map_height, feature_map_width, n_boxes_per_cell, 4)`
where the last dimension contains `(xmin, xmax, ymin, ymax)` for each anchor box in each
cell of the feature map.
'''
# Compute box width and height for each aspect ratio.
# The shorter side of the image will be used to compute `w` and `h` using `scale` and
# `aspect_ratios`.
size = min(self.img_height, self.img_width)
# Compute the box widths and and heights for all aspect ratios
wh_list = []
for ar in aspect_ratios:
if (ar == 1):
# Compute the regular anchor box for aspect ratio 1.
box_height = box_width = this_scale * size
wh_list.append((box_width, box_height))
if self.two_boxes_for_ar1:
# Compute one slightly larger version using the geometric mean of this scale
# value and the next.
box_height = box_width = np.sqrt(this_scale * next_scale) * size
wh_list.append((box_width, box_height))
else:
box_width = this_scale * size * np.sqrt(ar)
box_height = this_scale * size / np.sqrt(ar)
wh_list.append((box_width, box_height))
wh_list = np.array(wh_list)
n_boxes = len(wh_list)
# Compute the grid of box center points. They are identical for all aspect ratios.
# Compute the step sizes, i.e. how far apart the anchor box center points will be vertically
# and horizontally.
if (this_steps is None):
step_height = self.img_height / feature_map_size[0]
step_width = self.img_width / feature_map_size[1]
else:
if isinstance(this_steps, (list, tuple)) and (len(this_steps) == 2):
step_height = this_steps[0]
step_width = this_steps[1]
elif isinstance(this_steps, (int, float)):
step_height = this_steps
step_width = this_steps
# Compute the offsets, i.e. at what pixel values the first anchor box center point will be
# from the top and from the left of the image.
if (this_offsets is None):
offset_height = 0.5
offset_width = 0.5
else:
if isinstance(this_offsets, (list, tuple)) and (len(this_offsets) == 2):
offset_height = this_offsets[0]
offset_width = this_offsets[1]
elif isinstance(this_offsets, (int, float)):
offset_height = this_offsets
offset_width = this_offsets
# Now that we have the offsets and step sizes, compute the grid of anchor box center points.
cy = np.linspace(offset_height * step_height,
(offset_height + feature_map_size[0] - 1) * step_height,
feature_map_size[0])
cx = np.linspace(offset_width * step_width,
(offset_width + feature_map_size[1] - 1) * step_width, feature_map_size[1])
cx_grid, cy_grid = np.meshgrid(cx, cy)
# This is necessary for np.tile() to do what we want further down
cx_grid = np.expand_dims(cx_grid, -1)
# This is necessary for np.tile() to do what we want further down
cy_grid = np.expand_dims(cy_grid, -1)
# Create a 4D tensor template of shape `(feature_map_height, feature_map_width, n_boxes, 4)`
# where the last dimension will contain `(cx, cy, w, h)`
boxes_tensor = np.zeros((feature_map_size[0], feature_map_size[1], n_boxes, 4))
boxes_tensor[:, :, :, 0] = np.tile(cx_grid, (1, 1, n_boxes)) # Set cx
boxes_tensor[:, :, :, 1] = np.tile(cy_grid, (1, 1, n_boxes)) # Set cy
boxes_tensor[:, :, :, 2] = wh_list[:, 0] # Set w
boxes_tensor[:, :, :, 3] = wh_list[:, 1] # Set h
# Convert `(cx, cy, w, h)` to `(xmin, ymin, xmax, ymax)`
boxes_tensor = np_convert_coordinates(boxes_tensor,
start_index=0,
conversion='centroids2corners')
# If `clip_boxes` is enabled, clip the coordinates to lie within the image boundaries
if self.clip_boxes:
x_coords = boxes_tensor[:, :, :, [0, 2]]
x_coords[x_coords >= self.img_width] = self.img_width - 1
x_coords[x_coords < 0] = 0
boxes_tensor[:, :, :, [0, 2]] = x_coords
y_coords = boxes_tensor[:, :, :, [1, 3]]
y_coords[y_coords >= self.img_height] = self.img_height - 1
y_coords[y_coords < 0] = 0
boxes_tensor[:, :, :, [1, 3]] = y_coords
# `normalize_coords` is enabled, normalize the coordinates to be within [0,1]
if self.normalize_coords:
boxes_tensor[:, :, :, [0, 2]] /= self.img_width
boxes_tensor[:, :, :, [1, 3]] /= self.img_height
box_tensor_corner = np.array(boxes_tensor)
# Convert `(xmin, ymin, xmax, ymax)` back to `(cx, cy, w, h)`.
boxes_tensor = np_convert_coordinates(boxes_tensor,
start_index=0,
conversion='corners2centroids')
return boxes_tensor, box_tensor_corner
def __encode_template(self, boxes_list):
# Tile the anchor boxes for each predictor layer across all batch items.
boxes_batch = []
for boxes in boxes_list:
boxes = np.reshape(boxes, (-1, 4))
boxes_batch.append(boxes)
# Concatenate the anchor tensors from the individual layers to one.
boxes_tensor = np.concatenate(boxes_batch, axis=0)
# 3: Create a template tensor to hold the one-hot class encodings of shape
# `(batch, #boxes, #classes)`. It will contain all zeros for now, the classes will be set in
# the matching process that follows
classes_tensor = np.zeros((boxes_tensor.shape[0], self.n_classes))
# @TODO(tylerz): Setting all the boxes to be background by default
classes_tensor[:, 0] = 1
# 4: Create a tensor to contain the variances. This tensor has the same shape as
# `boxes_tensor` and simply contains the same 4 variance values for every position in the
# last axis.
variances_tensor = np.zeros_like(boxes_tensor)
variances_tensor += self.variances # Long live broadcasting
self.variances_tensor = tf.convert_to_tensor(variances_tensor, dtype=tf.float32)
# 4: Concatenate the classes, boxes and variances tensors to get our final template for
# y_encoded. We also need another tensor of the shape of `boxes_tensor` as a space filler
# so that `y_encoding_template` has the same shape as the SSD model output tensor. The
# content of this tensor is irrelevant, we'll just use `boxes_tensor` a second time.
y_encoding_template = np.concatenate((classes_tensor,
boxes_tensor, boxes_tensor, variances_tensor), axis=1)
return y_encoding_template
def __call__(self, ground_truth_labels):
'''
Converts ground truth bounding box data into a suitable format to train an SSD model.
Arguments:
ground_truth_labels (list): A python list of length `batch_size` that contains one 2D
Numpy array for each batch image. Each such array has `k` rows for the `k` ground
truth bounding boxes belonging to the respective image, and the data for each ground
truth bounding box has the format `(class_id, xmin, ymin, xmax, ymax)` (i.e. the
'corners' coordinate format), and `class_id` must be an integer greater than 0 for
all boxes as class ID 0 is reserved for the background class.
Returns:
`y_encoded`, a 3D numpy array of shape `(batch_size, #boxes, #classes + 4 + 4 + 4)` that
serves as the ground truth label tensor for training, where `#boxes` is the total number
of boxes predicted by the model per image, and the classes are one-hot-encoded. The four
elements after the class vecotrs in the last axis are the box coordinates, the next four
elements after that are just dummy elements, and the last four elements are the
variances.
'''
y_encoded = []
##################################################################################
# Match ground truth boxes to anchor boxes.
##################################################################################
# Match the ground truth boxes to the anchor boxes. Every anchor box that does not have
# a ground truth match and for which the maximal IoU overlap with any ground truth box is
# less than or equal to `neg_iou_limit` will be a negative (background) box.
for gt_label in ground_truth_labels: # For each batch item...
match_y = tf.cond(tf.equal(tf.shape(gt_label)[0], 0),
lambda: self.encoding_template_tensor,
lambda label=gt_label: self.__calc_matched_anchor_gt(label))
y_encoded.append(match_y)
y_encoded = tf.stack(y_encoded, axis=0)
##################################################################################
# Convert box coordinates to anchor box offsets.
##################################################################################
y_encoded = self.__tf_convert_anchor_to_offset(y_encoded)
return y_encoded
def __tf_convert_anchor_to_offset(self, tensor):
return tensor_strided_replace(tensor, (-12, -8), tf.concat([
tf.truediv(tensor[..., -12:-10] - tensor[..., -8:-6],
tensor[..., -6:-4] * self.variances_tensor[..., -4:-2]),
tf.truediv(tf.log(tf.truediv(tensor[..., -10:-8], tensor[..., -6:-4])),
self.variances_tensor[..., -2:])
], axis=-1), axis=-1)
def __calc_matched_anchor_gt(self, gt_label):
gt_label = tf.cast(gt_label, tf.float32)
# Maybe normalize the box coordinates.
confs = tf.gather(gt_label, tf.constant(0, dtype=tf.int32), axis=-1)
if self.normalize_coords:
# gt_label is [class_id, xmin, ymin, xmax, ymax]
"""
gt_label = tf.stack([gt_label[:, 0],
tf.truediv(gt_label[:, 1], self.img_width),
tf.truediv(gt_label[:, 2], self.img_height),
tf.truediv(gt_label[:, 3], self.img_width),
tf.truediv(gt_label[:, 4], self.img_height),
], axis=-1)
"""
if not self.gt_normalized:
x_mins = tf.gather(gt_label, tf.constant(1, dtype=tf.int32), axis=-1)
y_mins = tf.gather(gt_label, tf.constant(2, dtype=tf.int32), axis=-1)
x_maxs = tf.gather(gt_label, tf.constant(3, dtype=tf.int32), axis=-1)
y_maxs = tf.gather(gt_label, tf.constant(4, dtype=tf.int32), axis=-1)
gt_label = tf.stack([confs,
tf.truediv(x_mins, self.img_width),
tf.truediv(y_mins, self.img_height),
tf.truediv(x_maxs, self.img_width),
tf.truediv(y_maxs, self.img_height)], axis=-1)
"""
classes_one_hot = tf.one_hot(tf.reshape(tf.cast(gt_label[:, 0], tf.int32), [-1]),
self.n_classes)
"""
classes_one_hot = tf.one_hot(tf.reshape(tf.cast(confs, tf.int32), [-1]),
self.n_classes)
# Compute the IoU similarities between all anchor boxes and all ground truth boxes for this
# batch item. This is a matrix of shape `(num_ground_truth_boxes, num_anchor_boxes).
gt_xys = tf.gather(gt_label, tf.range(1, 5), axis=-1)
# similarities = iou(gt_label[..., 1:], self.anchorbox_tensor)
similarities = iou(gt_xys, self.anchorbox_tensor)
# Maybe convert the box coordinate format.
# gt_label = corners_to_centroids(gt_label, start_index=1)
gt_centroid = corners_to_centroids(gt_label, start_index=1)
gt_centroid = tf.gather(gt_centroid, tf.range(1, 5), axis=-1)
# labels_one_hot = tf.concat([classes_one_hot, gt_label[:, 1:]], axis=-1)
labels_one_hot = tf.concat([classes_one_hot, gt_centroid], axis=-1)
# First: Do bipartite matching, i.e. match each ground truth box to the one anchor box with
# the highest IoU.
# This ensures that each ground truth box will have at least one good match.
# For each ground truth box, get the anchor box to match with it.
bipartite_matches = bipartite_match_row(similarities)
# Write the ground truth data to the matched anchor boxes.
y_encoded_cls_box = self.encoding_template_tensor[:, :-8]
match_y = tensor_slice_replace(y_encoded_cls_box,
labels_one_hot,
bipartite_matches,
tf.range(tf.shape(labels_one_hot)[0]))
# Write the highest IOU flag:
end_flag = tf.expand_dims(self.encoding_template_tensor[:, -1], -1)
end_flag_on = tf.fill(tf.shape(end_flag), 1024.0)
end_flag = tensor_slice_replace(end_flag,
end_flag_on,
bipartite_matches,
bipartite_matches)
# Set the columns of the matched anchor boxes to zero to indicate that they were matched.
sim_trans = tf.transpose(similarities)
sim_trans_zero = tf.zeros_like(sim_trans)
sim_trans_replace = tensor_slice_replace(sim_trans,
sim_trans_zero,
bipartite_matches,
bipartite_matches)
similarities = tf.transpose(sim_trans_replace)
# Second: Maybe do 'multi' matching, where each remaining anchor box will be matched to its
# most similar ground truth box with an IoU of at least `pos_iou_threshold`, or not
# matched if there is no such ground truth box.
# Get all matches that satisfy the IoU threshold.
matches = multi_match(similarities, self.pos_iou_threshold)
# Write the ground truth data to the matched anchor boxes.
match_y = tensor_slice_replace(match_y,
labels_one_hot,
matches[1],
matches[0])
# Set the columns of the matched anchor boxes to zero to indicate that they were matched.
sim_trans_replace = tensor_slice_replace(sim_trans_replace,
sim_trans_zero,
matches[1],
matches[1])
similarities = tf.transpose(sim_trans_replace)
# Third: Now after the matching is done, all negative (background) anchor boxes that have
# an IoU of `neg_iou_limit` or more with any ground truth box will be set to netral,
# i.e. they will no longer be background boxes. These anchors are "too close" to a
# ground truth box to be valid background boxes.
max_background_similarities = tf.reduce_max(similarities, axis=0)
neutral_boxes = tf.reshape(tf.where(max_background_similarities >= self.neg_iou_limit),
[-1])
neutral_boxes = tf.cast(neutral_boxes, tf.int32)
match_y_bg_only = tf.expand_dims(match_y[:, 0], -1)
match_y_bg_only_zero = tf.zeros_like(match_y_bg_only)
match_y_bg_only = tensor_slice_replace(match_y_bg_only,
match_y_bg_only_zero,
neutral_boxes,
neutral_boxes)
match_y = tensor_strided_replace(match_y, (0, 1), match_y_bg_only, axis=-1)
match_y = tf.concat([match_y, self.encoding_template_tensor[:, -8:-1], end_flag], axis=-1)
return match_y
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/box_coder/input_encoder.py |
'''
An encoder that converts ground truth annotations to SSD-compatible training targets.
Copyright (C) 2018 Pierluigi Ferrari
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from __future__ import division
import numpy as np
from nvidia_tao_tf1.cv.ssd.box_coder.bounding_box_utils import convert_coordinates, iou_clean
from nvidia_tao_tf1.cv.ssd.box_coder.matching_utils import match_bipartite_greedy, match_multi
class SSDInputEncoderNP:
'''
Transforms ground truth labels for object detection.
In the process of encoding the ground truth labels, a template of anchor boxes
is being built, which are subsequently matched to the ground truth boxes
via an intersection-over-union threshold criterion.
'''
def __init__(self,
img_height,
img_width,
n_classes,
predictor_sizes,
min_scale=0.1,
max_scale=0.9,
scales=None,
aspect_ratios_global=None,
aspect_ratios_per_layer=None,
two_boxes_for_ar1=True,
steps=None,
offsets=None,
clip_boxes=False,
variances=None,
matching_type='multi',
pos_iou_threshold=0.5,
neg_iou_limit=0.3,
border_pixels='half',
normalize_coords=True,
background_id=0):
'''init.'''
predictor_sizes = np.array(predictor_sizes)
if predictor_sizes.ndim == 1:
predictor_sizes = np.expand_dims(predictor_sizes, axis=0)
if (min_scale is None or max_scale is None) and scales is None:
raise ValueError(
"Either `min_scale` and `max_scale` or `scales` need to be specified.")
if scales:
# Must be two nested `if` statements since `list` and `bool` cannot be combined by `&`
if (len(scales) != predictor_sizes.shape[0] + 1):
raise ValueError("It must be either scales is None or \
len(scales) == len(predictor_sizes)+1")
scales = np.array(scales)
if np.any(scales <= 0):
raise ValueError(
"All values in `scales` must be greater than 0")
else:
if not 0 < min_scale <= max_scale:
raise ValueError("It must be 0 < min_scale <= max_scale")
if not (aspect_ratios_per_layer is None):
# Must be two nested `if` statements since `list` and `bool` cannot be combined by `&`
if (len(aspect_ratios_per_layer) != predictor_sizes.shape[0]):
raise ValueError("It must be either aspect_ratios_per_layer is None or \
len(aspect_ratios_per_layer) == len(predictor_sizes), \
but len(aspect_ratios_per_layer) == {} and \
len(predictor_sizes) == {}".format(
len(aspect_ratios_per_layer), len(predictor_sizes)))
for aspect_ratios in aspect_ratios_per_layer:
if np.any(np.array(aspect_ratios) <= 0):
raise ValueError(
"All aspect ratios must be greater than zero.")
else:
if (aspect_ratios_global is None):
raise ValueError(
"At least one of `aspect_ratios_global` \
and `aspect_ratios_per_layer` must not be `None`.")
if np.any(np.array(aspect_ratios_global) <= 0):
raise ValueError(
"All aspect ratios must be greater than zero.")
if len(variances) != 4:
raise ValueError(
"4 variance values must be pased,\
but {} values were received.".format(len(variances)))
variances = np.array(variances)
if np.any(variances <= 0):
raise ValueError(
"All variances must be >0, but the variances given are {}".format(variances))
if (not (steps is None)) and (len(steps) != predictor_sizes.shape[0]):
raise ValueError(
"You must provide at least one step value per predictor layer.")
if (not (offsets is None)) and (len(offsets) != predictor_sizes.shape[0]):
raise ValueError(
"You must provide at least one offset value per predictor layer.")
##################################################################################
# Set or compute members.
##################################################################################
self.img_height = img_height
self.img_width = img_width
self.n_classes = n_classes
self.predictor_sizes = predictor_sizes
self.min_scale = min_scale
self.max_scale = max_scale
# If `scales` is None, compute the scaling factors by linearly interpolating between
# `min_scale` and `max_scale`. If an explicit list of `scales` is given, however,
# then it takes precedent over `min_scale` and `max_scale`.
if (scales is None):
self.scales = np.linspace(
self.min_scale, self.max_scale, len(self.predictor_sizes)+1)
else:
self.scales = scales
# If `aspect_ratios_per_layer` is None, then we use the same list of aspect ratios
# `aspect_ratios_global` for all predictor layers. If `aspect_ratios_per_layer` is given,
# however, then it takes precedent over `aspect_ratios_global`.
if (aspect_ratios_per_layer is None):
self.aspect_ratios = [aspect_ratios_global] * \
predictor_sizes.shape[0]
else:
# If aspect ratios are given per layer, we'll use those.
self.aspect_ratios = aspect_ratios_per_layer
self.two_boxes_for_ar1 = two_boxes_for_ar1
if not (steps is None):
self.steps = steps
else:
self.steps = [None] * predictor_sizes.shape[0]
if not (offsets is None):
self.offsets = offsets
else:
self.offsets = [None] * predictor_sizes.shape[0]
self.clip_boxes = clip_boxes
self.variances = variances
self.matching_type = matching_type
self.pos_iou_threshold = pos_iou_threshold
self.neg_iou_limit = neg_iou_limit
self.border_pixels = border_pixels
self.normalize_coords = normalize_coords
self.background_id = background_id
# Compute the number of boxes per spatial location for each predictor layer.
# For example, if a predictor layer has three different aspect ratios,
# [1.0, 0.5, 2.0], and is
# supposed to predict two boxes of slightly different size for aspect
# ratio 1.0, then that predictor
# layer predicts a total of four boxes at every spatial location across the feature map.
if not (aspect_ratios_per_layer is None):
self.n_boxes = []
for aspect_ratios in aspect_ratios_per_layer:
if (1 in aspect_ratios) & two_boxes_for_ar1:
self.n_boxes.append(len(aspect_ratios) + 1)
else:
self.n_boxes.append(len(aspect_ratios))
else:
if (1 in aspect_ratios_global) & two_boxes_for_ar1:
self.n_boxes = len(aspect_ratios_global) + 1
else:
self.n_boxes = len(aspect_ratios_global)
##################################################################################
# Compute the anchor boxes for each predictor layer.
##################################################################################
# Compute the anchor boxes for each predictor layer. We only have to do this once
# since the anchor boxes depend only on the model configuration, not on the input data.
# For each predictor layer (i.e. for each scaling factor) the tensors for that layer's
# anchor boxes will have the shape `(feature_map_height, feature_map_width, n_boxes, 4)`.
# This will store the anchor boxes for each predicotr layer.
self.boxes_list = []
# The following lists just store diagnostic information. Sometimes it's handy to have the
# boxes' center points, heights, widths, etc. in a list.
self.wh_list_diag = [] # Box widths and heights for each predictor layer
# Horizontal and vertical distances between any two boxes for each predictor layer
self.steps_diag = []
self.offsets_diag = [] # Offsets for each predictor layer
# Anchor box center points as `(cy, cx)` for each predictor layer
self.centers_diag = []
# Iterate over all predictor layers and compute the anchor boxes for each one.
for i in range(len(self.predictor_sizes)):
out = self.generate_anchor_boxes_for_layer(feature_map_size=self.predictor_sizes[i],
aspect_ratios=self.aspect_ratios[i],
this_scale=self.scales[i],
next_scale=self.scales[i+1],
this_steps=self.steps[i],
this_offsets=self.offsets[i],
diagnostics=True)
boxes, center, wh, step, offset = out
self.boxes_list.append(boxes)
self.wh_list_diag.append(wh)
self.steps_diag.append(step)
self.offsets_diag.append(offset)
self.centers_diag.append(center)
self.y_encoding_template = np.ascontiguousarray(
self.generate_encoding_template(diagnostics=False))
def __call__(self, ground_truth_labels, diagnostics=False):
'''Converts ground truth bounding box data into a suitable format to train an SSD model.'''
# Mapping to define which indices represent which coordinates in the ground truth.
class_id = 0
xmin = 1
ymin = 2
xmax = 3
ymax = 4
# batch_size = len(ground_truth_labels)
##################################################################################
# Generate the template for y_encoded.
##################################################################################
y_encoded = np.copy(self.y_encoding_template)
##################################################################################
# Match ground truth boxes to anchor boxes.
##################################################################################
# All boxes are background boxes by default.
y_encoded[:, self.background_id] = 1
# The total number of boxes that the model predicts per batch item
# An identity matrix that we'll use as one-hot class vectors
class_vectors = np.eye(self.n_classes)
# If there is no ground truth for this batch item, there is nothing to match.
if ground_truth_labels.size != 0:
labels = ground_truth_labels.astype(
np.float) # The labels for this batch item
# Check for degenerate ground truth bounding boxes before attempting any computations.
if np.any(labels[:, [xmax]] - labels[:, [xmin]] <= 0) or \
np.any(labels[:, [ymax]] - labels[:, [ymin]] <= 0):
raise DegenerateBoxError("SSDInputEncoder detected degenerate \
ground truth bounding boxes\
with bounding boxes {}, ".format(labels) +
"i.e. bounding boxes where xmax <= xmin and/or ymax <= ymin. \
Degenerate ground truth " +
"bounding boxes will lead to NaN during the training.")
# Maybe normalize the box coordinates.
if self.normalize_coords:
# Normalize ymin and ymax relative to the image height
labels[:, [ymin, ymax]] /= self.img_height
# Normalize xmin and xmax relative to the image width
labels[:, [xmin, xmax]] /= self.img_width
# The one-hot class IDs for the ground truth boxes of this batch item
classes_one_hot = class_vectors[labels[:, class_id].astype(np.int)]
# The one-hot version of the labels for this batch item
labels_one_hot = np.concatenate(
[classes_one_hot, labels[:, [xmin, ymin, xmax, ymax]]], axis=-1)
# Compute the IoU similarities between all anchor boxes
# and all ground truth boxes for this batch item.
# This is a matrix of shape `(num_ground_truth_boxes, num_anchor_boxes)`.
similarities = iou_clean(labels[:, [xmin, ymin, xmax, ymax]], y_encoded[:, -12:-8])
# First: Do bipartite matching, i.e. match each ground truth box
# to the one anchor box with the highest IoU.
# This ensures that each ground truth box will have at least one good match.
# For each ground truth box, get the anchor box to match with it.
bipartite_matches = match_bipartite_greedy(
weight_matrix=similarities)
# Write the ground truth data to the matched anchor boxes.
y_encoded[bipartite_matches, :-8] = labels_one_hot
# Write the highest IOU flag
y_encoded[bipartite_matches, -1] = 1024
# Set the columns of the matched anchor boxes to
# zero to indicate that they were matched.
similarities[:, bipartite_matches] = 0
# Second: Maybe do 'multi' matching, where each remaining anchor
# box will be matched to its most similar
# ground truth box with an IoU of at least `pos_iou_threshold`,
# or not matched if there is no
# such ground truth box.
if self.matching_type == 'multi':
# Get all matches that satisfy the IoU threshold.
matches = match_multi(
weight_matrix=similarities, threshold=self.pos_iou_threshold)
# Write the ground truth data to the matched anchor boxes.
y_encoded[matches[1], :-8] = labels_one_hot[matches[0]]
# Set the columns of the matched anchor boxes to
# zero to indicate that they were matched.
similarities[:, matches[1]] = 0
# Third: Now after the matching is done, all negative (background)
# anchor boxes that have
# an IoU of `neg_iou_limit` or more with
# any ground truth box will be set to netral,
# i.e. they will no longer be background boxes.
# These anchors are "too close" to a
# ground truth box to be valid background boxes.
max_background_similarities = np.amax(similarities, axis=0)
neutral_boxes = np.nonzero(
max_background_similarities >= self.neg_iou_limit)[0]
y_encoded[neutral_boxes, self.background_id] = 0
##################################################################################
# Convert box coordinates to anchor box offsets.
##################################################################################
y_encoded[:, -12:-8] = convert_coordinates(
y_encoded[:, -12:-8], start_index=0,
conversion='corners2centroids', border_pixels=self.border_pixels)
# if self.coords == 'centroids':
# cx(gt) - cx(anchor), cy(gt) - cy(anchor)
y_encoded[:, [-12, -11]] -= y_encoded[:, [-8, -7]]
# (cx(gt) - cx(anchor)) / w(anchor) / cx_variance,
# (cy(gt) - cy(anchor)) / h(anchor) / cy_variance
# y_encoded[:, [-12, -11]] /= y_encoded[:, [-6, -5]] * y_encoded[:, [-4, -3]]
y_encoded[:, [-12, -11]] /= y_encoded[:, [-6, -5]] * self.variances_tensor[:, [-4, -3]]
# w(gt) / w(anchor), h(gt) / h(anchor)
y_encoded[:, [-10, -9]] /= y_encoded[:, [-6, -5]]
# ln(w(gt) / w(anchor)) / w_variance,
# ln(h(gt) / h(anchor)) / h_variance (ln == natural logarithm)
# y_encoded[:, [-10, -9]] = np.log(y_encoded[:, [-10, -9]]) / y_encoded[:, [-2, -1]]
y_encoded[:, [-10, -9]] = \
np.log(y_encoded[:, [-10, -9]]) / self.variances_tensor[:, [-2, -1]]
if diagnostics:
# Here we'll save the matched anchor boxes
# (i.e. anchor boxes that were matched to a ground truth box,
# but keeping the anchor box coordinates).
y_matched_anchors = np.copy(y_encoded)
# Keeping the anchor box coordinates means setting the offsets to zero.
y_matched_anchors[:, -12:-8] = 0
return y_encoded, y_matched_anchors
return y_encoded
def generate_anchor_boxes_for_layer(self,
feature_map_size,
aspect_ratios,
this_scale,
next_scale,
this_steps=None,
this_offsets=None,
diagnostics=False):
'''generate anchors per layer.'''
# Compute box width and height for each aspect ratio.
# The shorter side of the image will be used to compute `w` and `h`
# using `scale` and `aspect_ratios`.
size = min(self.img_height, self.img_width)
# Compute the box widths and and heights for all aspect ratios
wh_list = []
for ar in aspect_ratios:
if (ar == 1):
# Compute the regular anchor box for aspect ratio 1.
box_height = box_width = this_scale * size
wh_list.append((box_width, box_height))
if self.two_boxes_for_ar1:
# Compute one slightly larger version using the
# geometric mean of this scale value and the next.
box_height = box_width = np.sqrt(
this_scale * next_scale) * size
wh_list.append((box_width, box_height))
else:
box_width = this_scale * size * np.sqrt(ar)
box_height = this_scale * size / np.sqrt(ar)
wh_list.append((box_width, box_height))
wh_list = np.array(wh_list)
n_boxes = len(wh_list)
# Compute the grid of box center points. They are identical for all aspect ratios.
# Compute the step sizes,
# i.e. how far apart the anchor box center points will be vertically and horizontally.
if (this_steps is None):
step_height = self.img_height / feature_map_size[0]
step_width = self.img_width / feature_map_size[1]
else:
if isinstance(this_steps, (list, tuple)) and (len(this_steps) == 2):
step_height = this_steps[0]
step_width = this_steps[1]
elif isinstance(this_steps, (int, float)):
step_height = this_steps
step_width = this_steps
# Compute the offsets,
# i.e. at what pixel values the first anchor box center point
# will be from the top and from the left of the image.
if (this_offsets is None):
offset_height = 0.5
offset_width = 0.5
else:
if isinstance(this_offsets, (list, tuple)) and (len(this_offsets) == 2):
offset_height = this_offsets[0]
offset_width = this_offsets[1]
elif isinstance(this_offsets, (int, float)):
offset_height = this_offsets
offset_width = this_offsets
# Now that we have the offsets and step sizes, compute the grid of anchor box center points.
cy = np.linspace(offset_height * step_height,
(offset_height +
feature_map_size[0] - 1) * step_height,
feature_map_size[0])
cx = np.linspace(offset_width * step_width,
(offset_width + feature_map_size[1] - 1) * step_width,
feature_map_size[1])
cx_grid, cy_grid = np.meshgrid(cx, cy)
# This is necessary for np.tile() to do what we want further down
cx_grid = np.expand_dims(cx_grid, -1)
# This is necessary for np.tile() to do what we want further down
cy_grid = np.expand_dims(cy_grid, -1)
# Create a 4D tensor template of shape
# `(feature_map_height, feature_map_width, n_boxes, 4)`
# where the last dimension will contain `(cx, cy, w, h)`
boxes_tensor = np.zeros(
(feature_map_size[0], feature_map_size[1], n_boxes, 4))
boxes_tensor[:, :, :, 0] = np.tile(cx_grid, (1, 1, n_boxes)) # Set cx
boxes_tensor[:, :, :, 1] = np.tile(cy_grid, (1, 1, n_boxes)) # Set cy
boxes_tensor[:, :, :, 2] = wh_list[:, 0] # Set w
boxes_tensor[:, :, :, 3] = wh_list[:, 1] # Set h
# Convert `(cx, cy, w, h)` to `(xmin, ymin, xmax, ymax)`
boxes_tensor = convert_coordinates(
boxes_tensor, start_index=0, conversion='centroids2corners')
# If `clip_boxes` is enabled, clip the coordinates to lie within the image boundaries
if self.clip_boxes:
x_coords = boxes_tensor[:, :, :, [0, 2]]
x_coords[x_coords >= self.img_width] = self.img_width - 1
x_coords[x_coords < 0] = 0
boxes_tensor[:, :, :, [0, 2]] = x_coords
y_coords = boxes_tensor[:, :, :, [1, 3]]
y_coords[y_coords >= self.img_height] = self.img_height - 1
y_coords[y_coords < 0] = 0
boxes_tensor[:, :, :, [1, 3]] = y_coords
# `normalize_coords` is enabled, normalize the coordinates to be within [0,1]
if self.normalize_coords:
boxes_tensor[:, :, :, [0, 2]] /= self.img_width
boxes_tensor[:, :, :, [1, 3]] /= self.img_height
if diagnostics:
return boxes_tensor, (cy, cx), wh_list, (step_height, step_width),\
(offset_height, offset_width)
return boxes_tensor
def generate_encoding_template(self, diagnostics=False):
'''
Produces an encoding template for the ground truth label tensor for a given batch.
Arguments:
batch_size (int): The batch size.
Returns:
A Numpy array of shape `(batch_size, #boxes, #classes + 12)`
'''
# Tile the anchor boxes for each predictor layer across all batch items.
boxes_batch = []
for boxes in self.boxes_list:
boxes = np.expand_dims(boxes, axis=0)
boxes = np.reshape(boxes, (-1, 4))
boxes_batch.append(boxes)
# Concatenate the anchor tensors from the individual layers to one.
# boxes_tensor = np.concatenate(boxes_batch, axis=1)
boxes_tensor = np.concatenate(boxes_batch, axis=0)
classes_tensor = np.zeros((boxes_tensor.shape[0], self.n_classes))
# 4: Create a tensor to contain the variances.
variances_tensor = np.zeros_like(boxes_tensor)
variances_tensor += self.variances # Long live broadcasting
self.variances_tensor = variances_tensor
# 4: Concatenate the classes, boxes and variances tensors
boxes_tensor_centroid = convert_coordinates(
boxes_tensor, start_index=0, conversion='corners2centroids')
y_encoding_template = np.concatenate(
(classes_tensor, boxes_tensor, boxes_tensor_centroid, variances_tensor), axis=1)
if diagnostics:
return y_encoding_template, self.centers_diag, \
self.wh_list_diag, self.steps_diag, self.offsets_diag
return y_encoding_template
class DegenerateBoxError(Exception):
'''An exception class to be raised if degenerate boxes are being detected.'''
pass
class DefaultBoxes:
'''@TODO(tylerz): for using dali fn.box_encoder.'''
def __init__(self,
img_height,
img_width,
predictor_sizes,
min_scale=0.1,
max_scale=0.9,
scales=None,
aspect_ratios_global=None,
aspect_ratios_per_layer=None,
two_boxes_for_ar1=True,
steps=None,
offsets=None,
clip_boxes=False,
border_pixels='half'):
'''Init default boxes.'''
predictor_sizes = np.array(predictor_sizes)
if predictor_sizes.ndim == 1:
predictor_sizes = np.expand_dims(predictor_sizes, axis=0)
if (min_scale is None or max_scale is None) and scales is None:
raise ValueError(
"Either `min_scale` and `max_scale` or `scales` need to be specified.")
if scales:
# Must be two nested `if` statements since `list` and `bool` cannot be combined by `&`
if (len(scales) != predictor_sizes.shape[0] + 1):
raise ValueError("It must be either scales is None or \
len(scales) == len(predictor_sizes)+1")
scales = np.array(scales)
if np.any(scales <= 0):
raise ValueError(
"All values in `scales` must be greater than 0")
else:
if not 0 < min_scale <= max_scale:
raise ValueError("It must be 0 < min_scale <= max_scale")
if not (aspect_ratios_per_layer is None):
# Must be two nested `if` statements since `list` and `bool` cannot be combined by `&`
if (len(aspect_ratios_per_layer) != predictor_sizes.shape[0]):
raise ValueError("It must be either aspect_ratios_per_layer is None or \
len(aspect_ratios_per_layer) == len(predictor_sizes), \
but len(aspect_ratios_per_layer) == {} and \
len(predictor_sizes) == {}".format(
len(aspect_ratios_per_layer), len(predictor_sizes)))
for aspect_ratios in aspect_ratios_per_layer:
if np.any(np.array(aspect_ratios) <= 0):
raise ValueError(
"All aspect ratios must be greater than zero.")
else:
if (aspect_ratios_global is None):
raise ValueError(
"At least one of `aspect_ratios_global` \
and `aspect_ratios_per_layer` must not be `None`.")
if np.any(np.array(aspect_ratios_global) <= 0):
raise ValueError(
"All aspect ratios must be greater than zero.")
if (not (steps is None)) and (len(steps) != predictor_sizes.shape[0]):
raise ValueError(
"You must provide at least one step value per predictor layer.")
if (not (offsets is None)) and (len(offsets) != predictor_sizes.shape[0]):
raise ValueError(
"You must provide at least one offset value per predictor layer.")
##################################################################################
# Set or compute members.
##################################################################################
self.img_height = img_height
self.img_width = img_width
self.predictor_sizes = predictor_sizes
self.min_scale = min_scale
self.max_scale = max_scale
# If `scales` is None, compute the scaling factors by linearly interpolating between
# `min_scale` and `max_scale`. If an explicit list of `scales` is given, however,
# then it takes precedent over `min_scale` and `max_scale`.
if (scales is None):
self.scales = np.linspace(
self.min_scale, self.max_scale, len(self.predictor_sizes)+1)
else:
self.scales = scales
# If `aspect_ratios_per_layer` is None, then we use the same list of aspect ratios
# `aspect_ratios_global` for all predictor layers. If `aspect_ratios_per_layer` is given,
# however, then it takes precedent over `aspect_ratios_global`.
if (aspect_ratios_per_layer is None):
self.aspect_ratios = [aspect_ratios_global] * \
predictor_sizes.shape[0]
else:
# If aspect ratios are given per layer, we'll use those.
self.aspect_ratios = aspect_ratios_per_layer
self.two_boxes_for_ar1 = two_boxes_for_ar1
if not (steps is None):
self.steps = steps
else:
self.steps = [None] * predictor_sizes.shape[0]
if not (offsets is None):
self.offsets = offsets
else:
self.offsets = [None] * predictor_sizes.shape[0]
self.clip_boxes = clip_boxes
self.border_pixels = border_pixels
# Compute the number of boxes per spatial location for each predictor layer.
# For example, if a predictor layer has three different aspect ratios,
# [1.0, 0.5, 2.0], and is
# supposed to predict two boxes of slightly different size for aspect
# ratio 1.0, then that predictor
# layer predicts a total of four boxes at every spatial location across the feature map.
if not (aspect_ratios_per_layer is None):
self.n_boxes = []
for aspect_ratios in aspect_ratios_per_layer:
if (1 in aspect_ratios) & two_boxes_for_ar1:
self.n_boxes.append(len(aspect_ratios) + 1)
else:
self.n_boxes.append(len(aspect_ratios))
else:
if (1 in aspect_ratios_global) & two_boxes_for_ar1:
self.n_boxes = len(aspect_ratios_global) + 1
else:
self.n_boxes = len(aspect_ratios_global)
##################################################################################
# Compute the anchor boxes for each predictor layer.
##################################################################################
# Compute the anchor boxes for each predictor layer. We only have to do this once
# since the anchor boxes depend only on the model configuration, not on the input data.
# For each predictor layer (i.e. for each scaling factor) the tensors for that layer's
# anchor boxes will have the shape `(feature_map_height, feature_map_width, n_boxes, 4)`.
# This will store the anchor boxes for each predicotr layer.
self.boxes_list = []
# Iterate over all predictor layers and compute the anchor boxes for each one.
for i in range(len(self.predictor_sizes)):
boxes = self.generate_anchor_boxes_for_layer(feature_map_size=self.predictor_sizes[i],
aspect_ratios=self.aspect_ratios[i],
this_scale=self.scales[i],
next_scale=self.scales[i+1],
this_steps=self.steps[i],
this_offsets=self.offsets[i])
self.boxes_list.append(boxes)
def generate_anchor_boxes_for_layer(self,
feature_map_size,
aspect_ratios,
this_scale,
next_scale,
this_steps=None,
this_offsets=None):
'''generate anchors per layer.'''
# Compute box width and height for each aspect ratio.
# The shorter side of the image will be used to compute `w` and `h`
# using `scale` and `aspect_ratios`.
size = min(self.img_height, self.img_width)
# Compute the box widths and and heights for all aspect ratios
wh_list = []
for ar in aspect_ratios:
if (ar == 1):
# Compute the regular anchor box for aspect ratio 1.
box_height = box_width = this_scale * size
wh_list.append((box_width, box_height))
if self.two_boxes_for_ar1:
# Compute one slightly larger version using the
# geometric mean of this scale value and the next.
box_height = box_width = np.sqrt(
this_scale * next_scale) * size
wh_list.append((box_width, box_height))
else:
box_width = this_scale * size * np.sqrt(ar)
box_height = this_scale * size / np.sqrt(ar)
wh_list.append((box_width, box_height))
wh_list = np.array(wh_list)
n_boxes = len(wh_list)
# Compute the grid of box center points. They are identical for all aspect ratios.
# Compute the step sizes,
# i.e. how far apart the anchor box center points will be vertically and horizontally.
if (this_steps is None):
step_height = self.img_height / feature_map_size[0]
step_width = self.img_width / feature_map_size[1]
else:
if isinstance(this_steps, (list, tuple)) and (len(this_steps) == 2):
step_height = this_steps[0]
step_width = this_steps[1]
elif isinstance(this_steps, (int, float)):
step_height = this_steps
step_width = this_steps
# Compute the offsets,
# i.e. at what pixel values the first anchor box center point
# will be from the top and from the left of the image.
if (this_offsets is None):
offset_height = 0.5
offset_width = 0.5
else:
if isinstance(this_offsets, (list, tuple)) and (len(this_offsets) == 2):
offset_height = this_offsets[0]
offset_width = this_offsets[1]
elif isinstance(this_offsets, (int, float)):
offset_height = this_offsets
offset_width = this_offsets
# Now that we have the offsets and step sizes, compute the grid of anchor box center points.
cy = np.linspace(offset_height * step_height,
(offset_height +
feature_map_size[0] - 1) * step_height,
feature_map_size[0])
cx = np.linspace(offset_width * step_width,
(offset_width + feature_map_size[1] - 1) * step_width,
feature_map_size[1])
cx_grid, cy_grid = np.meshgrid(cx, cy)
# This is necessary for np.tile() to do what we want further down
cx_grid = np.expand_dims(cx_grid, -1)
# This is necessary for np.tile() to do what we want further down
cy_grid = np.expand_dims(cy_grid, -1)
# Create a 4D tensor template of shape
# `(feature_map_height, feature_map_width, n_boxes, 4)`
# where the last dimension will contain `(cx, cy, w, h)`
boxes_tensor = np.zeros(
(feature_map_size[0], feature_map_size[1], n_boxes, 4))
boxes_tensor[:, :, :, 0] = np.tile(cx_grid, (1, 1, n_boxes)) # Set cx
boxes_tensor[:, :, :, 1] = np.tile(cy_grid, (1, 1, n_boxes)) # Set cy
boxes_tensor[:, :, :, 2] = wh_list[:, 0] # Set w
boxes_tensor[:, :, :, 3] = wh_list[:, 1] # Set h
# Convert `(cx, cy, w, h)` to `(xmin, ymin, xmax, ymax)`
boxes_tensor = convert_coordinates(
boxes_tensor, start_index=0, conversion='centroids2corners')
# If `clip_boxes` is enabled, clip the coordinates to lie within the image boundaries
if self.clip_boxes:
x_coords = boxes_tensor[:, :, :, [0, 2]]
x_coords[x_coords >= self.img_width] = self.img_width - 1
x_coords[x_coords < 0] = 0
boxes_tensor[:, :, :, [0, 2]] = x_coords
y_coords = boxes_tensor[:, :, :, [1, 3]]
y_coords[y_coords >= self.img_height] = self.img_height - 1
y_coords[y_coords < 0] = 0
boxes_tensor[:, :, :, [1, 3]] = y_coords
# normalize the coordinates to be within [0,1]
boxes_tensor[:, :, :, [0, 2]] /= self.img_width
boxes_tensor[:, :, :, [1, 3]] /= self.img_height
return boxes_tensor
def as_ltrb_list(self):
'''Return the bboxes as ltrb list.'''
boxes_batch = []
for boxes in self.boxes_list:
boxes = np.expand_dims(boxes, axis=0)
boxes = np.reshape(boxes, (-1, 4))
boxes_batch.append(boxes)
# Concatenate the anchor tensors from the individual layers to one.
# boxes_tensor = np.concatenate(boxes_batch, axis=1)
boxes_tensor = np.concatenate(boxes_batch, axis=0)
boxes_tensor = boxes_tensor.flatten()
boxes_list = [x for x in boxes_tensor]
return boxes_list
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/ssd/box_coder/ssd_input_encoder.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.